[Orca-checkins] rev 116 - in trunk/orca: . orcallator config lib src docs packages packages/Storable-0.6.5 packages/Storable-0.6.5/t packages/Data-Dumper-2.101 packages/Data-Dumper-2.101/t packages/rrdtool-1.0.7.2 packages/rrdtool-1.0.7.2/src packages/rrdtool-1.0.7.2/cgilib-0.4 packages/rrdtool-1.0.7.2/zlib-1.1.3 packages/rrdtool-1.0.7.2/contrib packages/rrdtool-1.0.7.2/contrib/rrd-file-icon packages/rrdtool-1.0.7.2/contrib/log2rrd packages/rrdtool-1.0.7.2/contrib/trytime packages/rrdtool-1.0.7.2/perl-piped packages/rrdtool-1.0.7.2/config packages/rrdtool-1.0.7.2/config/libtool packages/rrdtool-1.0.7.2/doc packages/rrdtool-1.0.7.2/perl-shared packages/rrdtool-1.0.7.2/perl-shared/t packages/rrdtool-1.0.7.2/libpng-1.0.3 packages/rrdtool-1.0.7.2/gd1.3 packages/rrdtool-1.0.7.2/examples packages/Digest-MD5-2.09 packages/Digest-MD5-2.09/MD2 packages/Digest-MD5-2.09/MD2/t packages/Digest-MD5-2.09/t packages/Digest-MD5-2.09/lib packages/Digest-MD5-2.09/SHA1 packages/Digest-MD5-2.09/SHA1/t packages/Math-Interpolate-1.05 packages/Math-Interpolate-1.05/t packages/Math-Interpolate-1.05/lib/Math
blair at orcaware.com
blair at orcaware.com
Sat Jul 13 19:25:05 PDT 2002
Author: blair
Date: Fri, 28 Jun 2002 22:04:28 -0700
New Revision: 116
Added:
trunk/orca/config/aclocal.m4
trunk/orca/docs/FAQ
trunk/orca/docs/manual.html
trunk/orca/orcallator/Makefile.in
trunk/orca/packages/Data-Dumper-2.101/
trunk/orca/packages/Data-Dumper-2.101/Changes
trunk/orca/packages/Data-Dumper-2.101/Dumper.html
trunk/orca/packages/Data-Dumper-2.101/Dumper.pm
trunk/orca/packages/Data-Dumper-2.101/Dumper.pm.NOXSUB
trunk/orca/packages/Data-Dumper-2.101/Dumper.xs
trunk/orca/packages/Data-Dumper-2.101/MANIFEST
trunk/orca/packages/Data-Dumper-2.101/MANIFEST.NOXSUB
trunk/orca/packages/Data-Dumper-2.101/Makefile.PL
trunk/orca/packages/Data-Dumper-2.101/README
trunk/orca/packages/Data-Dumper-2.101/Todo
trunk/orca/packages/Data-Dumper-2.101/t/
trunk/orca/packages/Data-Dumper-2.101/t/dumper.t
trunk/orca/packages/Data-Dumper-2.101/t/overload.t
trunk/orca/packages/Digest-MD5-2.09/MD2/t/badfile.t
trunk/orca/packages/Digest-MD5-2.09/SHA1/fip180-1.gif
trunk/orca/packages/Digest-MD5-2.09/SHA1/fip180-1.html
trunk/orca/packages/Digest-MD5-2.09/SHA1/t/badfile.t
trunk/orca/packages/Digest-MD5-2.09/t/badfile.t
trunk/orca/packages/rrdtool-1.0.7.2/Makefile.am
trunk/orca/packages/rrdtool-1.0.7.2/NT-BUILD-TIPS.txt
trunk/orca/packages/rrdtool-1.0.7.2/cgilib-0.4/
trunk/orca/packages/rrdtool-1.0.7.2/cgilib-0.4/Makefile.am
trunk/orca/packages/rrdtool-1.0.7.2/cgilib-0.4/Makefile.in
trunk/orca/packages/rrdtool-1.0.7.2/cgilib-0.4/cgi.5
trunk/orca/packages/rrdtool-1.0.7.2/cgilib-0.4/cgi.c
trunk/orca/packages/rrdtool-1.0.7.2/cgilib-0.4/cgi.h
trunk/orca/packages/rrdtool-1.0.7.2/cgilib-0.4/cgiDebug.3
trunk/orca/packages/rrdtool-1.0.7.2/cgilib-0.4/cgiGetValue.3
trunk/orca/packages/rrdtool-1.0.7.2/cgilib-0.4/cgiHeader.3
trunk/orca/packages/rrdtool-1.0.7.2/cgilib-0.4/cgiInit.3
trunk/orca/packages/rrdtool-1.0.7.2/cgilib-0.4/cgiRedirect.3
trunk/orca/packages/rrdtool-1.0.7.2/cgilib-0.4/cgilib.dsp
trunk/orca/packages/rrdtool-1.0.7.2/cgilib-0.4/cgilib.dsw
trunk/orca/packages/rrdtool-1.0.7.2/cgilib-0.4/cgitest.c
trunk/orca/packages/rrdtool-1.0.7.2/cgilib-0.4/jumpto.c
trunk/orca/packages/rrdtool-1.0.7.2/cgilib-0.4/readme
trunk/orca/packages/rrdtool-1.0.7.2/config/Makefile.am
trunk/orca/packages/rrdtool-1.0.7.2/config/Makefile.in
trunk/orca/packages/rrdtool-1.0.7.2/config/acconfig.h
trunk/orca/packages/rrdtool-1.0.7.2/config/acinclude.m4
trunk/orca/packages/rrdtool-1.0.7.2/config/aclocal.m4
trunk/orca/packages/rrdtool-1.0.7.2/config/config.guess
trunk/orca/packages/rrdtool-1.0.7.2/config/config.sub
trunk/orca/packages/rrdtool-1.0.7.2/config/libtool/
trunk/orca/packages/rrdtool-1.0.7.2/config/libtool/libtool.m4
trunk/orca/packages/rrdtool-1.0.7.2/config/ltconfig
trunk/orca/packages/rrdtool-1.0.7.2/config/ltmain.sh
trunk/orca/packages/rrdtool-1.0.7.2/config/missing
trunk/orca/packages/rrdtool-1.0.7.2/config/mkinstalldirs
trunk/orca/packages/rrdtool-1.0.7.2/config/stamp-h.in
trunk/orca/packages/rrdtool-1.0.7.2/contrib/Makefile.am
trunk/orca/packages/rrdtool-1.0.7.2/contrib/Makefile.in
trunk/orca/packages/rrdtool-1.0.7.2/contrib/log2rrd/Makefile.am
trunk/orca/packages/rrdtool-1.0.7.2/contrib/log2rrd/Makefile.in
trunk/orca/packages/rrdtool-1.0.7.2/contrib/rrd-file-icon/
trunk/orca/packages/rrdtool-1.0.7.2/contrib/rrd-file-icon/Makefile.am
trunk/orca/packages/rrdtool-1.0.7.2/contrib/rrd-file-icon/Makefile.in
trunk/orca/packages/rrdtool-1.0.7.2/contrib/rrd-file-icon/README
trunk/orca/packages/rrdtool-1.0.7.2/contrib/rrd-file-icon/rrd.png
trunk/orca/packages/rrdtool-1.0.7.2/contrib/trytime/Makefile.am
trunk/orca/packages/rrdtool-1.0.7.2/contrib/trytime/Makefile.in
trunk/orca/packages/rrdtool-1.0.7.2/doc/Makefile.am
trunk/orca/packages/rrdtool-1.0.7.2/doc/Makefile.in
trunk/orca/packages/rrdtool-1.0.7.2/doc/RRDp.txt
trunk/orca/packages/rrdtool-1.0.7.2/doc/RRDs.txt
trunk/orca/packages/rrdtool-1.0.7.2/doc/rrdcgi.html
trunk/orca/packages/rrdtool-1.0.7.2/doc/rrdcgi.pod
trunk/orca/packages/rrdtool-1.0.7.2/doc/rrdcgi.txt
trunk/orca/packages/rrdtool-1.0.7.2/doc/rrdrestore.html
trunk/orca/packages/rrdtool-1.0.7.2/doc/rrdrestore.pod
trunk/orca/packages/rrdtool-1.0.7.2/doc/rrdrestore.txt
trunk/orca/packages/rrdtool-1.0.7.2/examples/
trunk/orca/packages/rrdtool-1.0.7.2/examples/Makefile.am
trunk/orca/packages/rrdtool-1.0.7.2/examples/Makefile.in
trunk/orca/packages/rrdtool-1.0.7.2/examples/bigtops.pl.in
trunk/orca/packages/rrdtool-1.0.7.2/examples/cgi-demo.cgi.in
trunk/orca/packages/rrdtool-1.0.7.2/examples/minmax.pl.in
trunk/orca/packages/rrdtool-1.0.7.2/examples/piped-demo.pl.in
trunk/orca/packages/rrdtool-1.0.7.2/examples/shared-demo.pl.in
trunk/orca/packages/rrdtool-1.0.7.2/examples/stripes.pl.in
trunk/orca/packages/rrdtool-1.0.7.2/gd1.3/Makefile.am
trunk/orca/packages/rrdtool-1.0.7.2/gd1.3/README.rrdtool
trunk/orca/packages/rrdtool-1.0.7.2/gd1.3/gdlucidab10.c
trunk/orca/packages/rrdtool-1.0.7.2/gd1.3/gdlucidab10.h
trunk/orca/packages/rrdtool-1.0.7.2/gd1.3/gdlucidab12.c
trunk/orca/packages/rrdtool-1.0.7.2/gd1.3/gdlucidab12.h
trunk/orca/packages/rrdtool-1.0.7.2/gd1.3/gdlucidab14.c
trunk/orca/packages/rrdtool-1.0.7.2/gd1.3/gdlucidab14.h
trunk/orca/packages/rrdtool-1.0.7.2/gd1.3/gdlucidan10.c
trunk/orca/packages/rrdtool-1.0.7.2/gd1.3/gdlucidan10.h
trunk/orca/packages/rrdtool-1.0.7.2/gd1.3/gdlucidan12.c
trunk/orca/packages/rrdtool-1.0.7.2/gd1.3/gdlucidan12.h
trunk/orca/packages/rrdtool-1.0.7.2/gd1.3/gdlucidan14.c
trunk/orca/packages/rrdtool-1.0.7.2/gd1.3/gdlucidan14.h
trunk/orca/packages/rrdtool-1.0.7.2/gd1.3/readme.txt
trunk/orca/packages/rrdtool-1.0.7.2/libpng-1.0.3/
trunk/orca/packages/rrdtool-1.0.7.2/libpng-1.0.3/ANNOUNCE
trunk/orca/packages/rrdtool-1.0.7.2/libpng-1.0.3/CHANGES
trunk/orca/packages/rrdtool-1.0.7.2/libpng-1.0.3/INSTALL
trunk/orca/packages/rrdtool-1.0.7.2/libpng-1.0.3/KNOWNBUG
trunk/orca/packages/rrdtool-1.0.7.2/libpng-1.0.3/Makefile.am
trunk/orca/packages/rrdtool-1.0.7.2/libpng-1.0.3/Makefile.in
trunk/orca/packages/rrdtool-1.0.7.2/libpng-1.0.3/README
trunk/orca/packages/rrdtool-1.0.7.2/libpng-1.0.3/README.rrdtool
trunk/orca/packages/rrdtool-1.0.7.2/libpng-1.0.3/TODO
trunk/orca/packages/rrdtool-1.0.7.2/libpng-1.0.3/Y2KINFO
trunk/orca/packages/rrdtool-1.0.7.2/libpng-1.0.3/ansi2knr.1
trunk/orca/packages/rrdtool-1.0.7.2/libpng-1.0.3/ansi2knr.c
trunk/orca/packages/rrdtool-1.0.7.2/libpng-1.0.3/example.c
trunk/orca/packages/rrdtool-1.0.7.2/libpng-1.0.3/libpng.3
trunk/orca/packages/rrdtool-1.0.7.2/libpng-1.0.3/libpng.txt
trunk/orca/packages/rrdtool-1.0.7.2/libpng-1.0.3/libpngpf.3
trunk/orca/packages/rrdtool-1.0.7.2/libpng-1.0.3/png.5
trunk/orca/packages/rrdtool-1.0.7.2/libpng-1.0.3/png.c
trunk/orca/packages/rrdtool-1.0.7.2/libpng-1.0.3/png.dsp
trunk/orca/packages/rrdtool-1.0.7.2/libpng-1.0.3/png.dsw
trunk/orca/packages/rrdtool-1.0.7.2/libpng-1.0.3/png.h
trunk/orca/packages/rrdtool-1.0.7.2/libpng-1.0.3/pngconf.h
trunk/orca/packages/rrdtool-1.0.7.2/libpng-1.0.3/pngerror.c
trunk/orca/packages/rrdtool-1.0.7.2/libpng-1.0.3/pngget.c
trunk/orca/packages/rrdtool-1.0.7.2/libpng-1.0.3/pngmem.c
trunk/orca/packages/rrdtool-1.0.7.2/libpng-1.0.3/pngpread.c
trunk/orca/packages/rrdtool-1.0.7.2/libpng-1.0.3/pngread.c
trunk/orca/packages/rrdtool-1.0.7.2/libpng-1.0.3/pngrio.c
trunk/orca/packages/rrdtool-1.0.7.2/libpng-1.0.3/pngrtran.c
trunk/orca/packages/rrdtool-1.0.7.2/libpng-1.0.3/pngrutil.c
trunk/orca/packages/rrdtool-1.0.7.2/libpng-1.0.3/pngset.c
trunk/orca/packages/rrdtool-1.0.7.2/libpng-1.0.3/pngtrans.c
trunk/orca/packages/rrdtool-1.0.7.2/libpng-1.0.3/pngwio.c
trunk/orca/packages/rrdtool-1.0.7.2/libpng-1.0.3/pngwrite.c
trunk/orca/packages/rrdtool-1.0.7.2/libpng-1.0.3/pngwtran.c
trunk/orca/packages/rrdtool-1.0.7.2/libpng-1.0.3/pngwutil.c
trunk/orca/packages/rrdtool-1.0.7.2/perl-piped/rrdpl.dsp
trunk/orca/packages/rrdtool-1.0.7.2/perl-piped/rrdpl.dsw
trunk/orca/packages/rrdtool-1.0.7.2/perl-shared/ntmake.pl
trunk/orca/packages/rrdtool-1.0.7.2/perl-shared/rrdpl.dsp
trunk/orca/packages/rrdtool-1.0.7.2/perl-shared/rrdpl.dsw
trunk/orca/packages/rrdtool-1.0.7.2/src/Makefile.am
trunk/orca/packages/rrdtool-1.0.7.2/src/gdpng.c
trunk/orca/packages/rrdtool-1.0.7.2/src/gifsize.c
trunk/orca/packages/rrdtool-1.0.7.2/src/pngsize.c
trunk/orca/packages/rrdtool-1.0.7.2/src/rrd_cgi.c
trunk/orca/packages/rrdtool-1.0.7.2/src/rrd_restore.c
trunk/orca/packages/rrdtool-1.0.7.2/zlib-1.1.3/
trunk/orca/packages/rrdtool-1.0.7.2/zlib-1.1.3/ChangeLog
trunk/orca/packages/rrdtool-1.0.7.2/zlib-1.1.3/FAQ
trunk/orca/packages/rrdtool-1.0.7.2/zlib-1.1.3/INDEX
trunk/orca/packages/rrdtool-1.0.7.2/zlib-1.1.3/Makefile.am
trunk/orca/packages/rrdtool-1.0.7.2/zlib-1.1.3/Makefile.in
trunk/orca/packages/rrdtool-1.0.7.2/zlib-1.1.3/README
trunk/orca/packages/rrdtool-1.0.7.2/zlib-1.1.3/README.rrdtool
trunk/orca/packages/rrdtool-1.0.7.2/zlib-1.1.3/adler32.c
trunk/orca/packages/rrdtool-1.0.7.2/zlib-1.1.3/algorithm.txt
trunk/orca/packages/rrdtool-1.0.7.2/zlib-1.1.3/compress.c
trunk/orca/packages/rrdtool-1.0.7.2/zlib-1.1.3/configure
trunk/orca/packages/rrdtool-1.0.7.2/zlib-1.1.3/crc32.c
trunk/orca/packages/rrdtool-1.0.7.2/zlib-1.1.3/deflate.c
trunk/orca/packages/rrdtool-1.0.7.2/zlib-1.1.3/deflate.h
trunk/orca/packages/rrdtool-1.0.7.2/zlib-1.1.3/gzio.c
trunk/orca/packages/rrdtool-1.0.7.2/zlib-1.1.3/infblock.c
trunk/orca/packages/rrdtool-1.0.7.2/zlib-1.1.3/infblock.h
trunk/orca/packages/rrdtool-1.0.7.2/zlib-1.1.3/infcodes.c
trunk/orca/packages/rrdtool-1.0.7.2/zlib-1.1.3/infcodes.h
trunk/orca/packages/rrdtool-1.0.7.2/zlib-1.1.3/inffast.c
trunk/orca/packages/rrdtool-1.0.7.2/zlib-1.1.3/inffast.h
trunk/orca/packages/rrdtool-1.0.7.2/zlib-1.1.3/inffixed.h
trunk/orca/packages/rrdtool-1.0.7.2/zlib-1.1.3/inflate.c
trunk/orca/packages/rrdtool-1.0.7.2/zlib-1.1.3/inftrees.c
trunk/orca/packages/rrdtool-1.0.7.2/zlib-1.1.3/inftrees.h
trunk/orca/packages/rrdtool-1.0.7.2/zlib-1.1.3/infutil.c
trunk/orca/packages/rrdtool-1.0.7.2/zlib-1.1.3/infutil.h
trunk/orca/packages/rrdtool-1.0.7.2/zlib-1.1.3/trees.c
trunk/orca/packages/rrdtool-1.0.7.2/zlib-1.1.3/trees.h
trunk/orca/packages/rrdtool-1.0.7.2/zlib-1.1.3/uncompr.c
trunk/orca/packages/rrdtool-1.0.7.2/zlib-1.1.3/zconf.h
trunk/orca/packages/rrdtool-1.0.7.2/zlib-1.1.3/zlib.3
trunk/orca/packages/rrdtool-1.0.7.2/zlib-1.1.3/zlib.dsp
trunk/orca/packages/rrdtool-1.0.7.2/zlib-1.1.3/zlib.dsw
trunk/orca/packages/rrdtool-1.0.7.2/zlib-1.1.3/zlib.h
trunk/orca/packages/rrdtool-1.0.7.2/zlib-1.1.3/zutil.c
trunk/orca/packages/rrdtool-1.0.7.2/zlib-1.1.3/zutil.h
Removed:
trunk/orca/packages/rrdtool-1.0.7.2/MANIFEST
trunk/orca/packages/rrdtool-1.0.7.2/Makefile.dist
trunk/orca/packages/rrdtool-1.0.7.2/contrib/trytime/Makefile
trunk/orca/packages/rrdtool-1.0.7.2/doc/GNUmakefile.in
trunk/orca/packages/rrdtool-1.0.7.2/doc/RRDp.3
trunk/orca/packages/rrdtool-1.0.7.2/doc/RRDs.3
trunk/orca/packages/rrdtool-1.0.7.2/doc/bin_dec_hex.1
trunk/orca/packages/rrdtool-1.0.7.2/doc/rrdcreate.1
trunk/orca/packages/rrdtool-1.0.7.2/doc/rrddump.1
trunk/orca/packages/rrdtool-1.0.7.2/doc/rrdfetch.1
trunk/orca/packages/rrdtool-1.0.7.2/doc/rrdgraph.1
trunk/orca/packages/rrdtool-1.0.7.2/doc/rrdlast.1
trunk/orca/packages/rrdtool-1.0.7.2/doc/rrdresize.1
trunk/orca/packages/rrdtool-1.0.7.2/doc/rrdtool.1
trunk/orca/packages/rrdtool-1.0.7.2/doc/rrdtune.1
trunk/orca/packages/rrdtool-1.0.7.2/doc/rrdtutorial.1
trunk/orca/packages/rrdtool-1.0.7.2/doc/rrdupdate.1
trunk/orca/packages/rrdtool-1.0.7.2/gd1.3/README
trunk/orca/packages/rrdtool-1.0.7.2/gd1.3/gddemo.c
trunk/orca/packages/rrdtool-1.0.7.2/perl-piped/examples/
trunk/orca/packages/rrdtool-1.0.7.2/perl-shared/examples/
trunk/orca/packages/rrdtool-1.0.7.2/src/rrd.dsp
trunk/orca/packages/rrdtool-1.0.7.2/src/rrd.dsw
Modified:
trunk/orca/CHANGES
trunk/orca/INSTALL
trunk/orca/Makefile.in
trunk/orca/NEWS
trunk/orca/README
trunk/orca/TODO
trunk/orca/config/PerlHead1.in
trunk/orca/config/PerlHead2.in
trunk/orca/configure
trunk/orca/configure.in
trunk/orca/lib/Makefile.in
trunk/orca/orcallator/orcallator.cfg.in
trunk/orca/orcallator/orcallator.se
trunk/orca/orcallator/start_orcallator.sh.in
trunk/orca/packages/Digest-MD5-2.09/Changes
trunk/orca/packages/Digest-MD5-2.09/MANIFEST
trunk/orca/packages/Digest-MD5-2.09/MD2/MD2.xs
trunk/orca/packages/Digest-MD5-2.09/MD5.pm
trunk/orca/packages/Digest-MD5-2.09/MD5.xs
trunk/orca/packages/Digest-MD5-2.09/README
trunk/orca/packages/Digest-MD5-2.09/SHA1/SHA1.pm
trunk/orca/packages/Digest-MD5-2.09/SHA1/SHA1.xs
trunk/orca/packages/Digest-MD5-2.09/lib/Digest.pm
trunk/orca/packages/Digest-MD5-2.09/lib/MD5.pm
trunk/orca/packages/Digest-MD5-2.09/t/files.t
trunk/orca/packages/Makefile.in
trunk/orca/packages/Math-Interpolate-1.05/CHANGES
trunk/orca/packages/Math-Interpolate-1.05/README
trunk/orca/packages/Math-Interpolate-1.05/lib/Math/Interpolate.pm
trunk/orca/packages/Math-Interpolate-1.05/lib/Math/IntervalSearch.pm
trunk/orca/packages/Math-Interpolate-1.05/t/01interval.t
trunk/orca/packages/Storable-0.6.5/ChangeLog
trunk/orca/packages/Storable-0.6.5/Makefile.PL
trunk/orca/packages/Storable-0.6.5/README
trunk/orca/packages/Storable-0.6.5/Storable.pm
trunk/orca/packages/Storable-0.6.5/Storable.xs
trunk/orca/packages/Storable-0.6.5/patchlevel.h
trunk/orca/packages/Storable-0.6.5/t/canonical.t
trunk/orca/packages/Storable-0.6.5/t/dclone.t
trunk/orca/packages/Storable-0.6.5/t/dump.pl
trunk/orca/packages/Storable-0.6.5/t/forgive.t
trunk/orca/packages/Storable-0.6.5/t/freeze.t
trunk/orca/packages/Storable-0.6.5/t/retrieve.t
trunk/orca/packages/Storable-0.6.5/t/store.t
trunk/orca/packages/Storable-0.6.5/t/tied.t
trunk/orca/packages/rrdtool-1.0.7.2/CHANGES
trunk/orca/packages/rrdtool-1.0.7.2/CONTRIBUTORS
trunk/orca/packages/rrdtool-1.0.7.2/COPYING
trunk/orca/packages/rrdtool-1.0.7.2/Makefile.in
trunk/orca/packages/rrdtool-1.0.7.2/README
trunk/orca/packages/rrdtool-1.0.7.2/TODO
trunk/orca/packages/rrdtool-1.0.7.2/config/config.h.in
trunk/orca/packages/rrdtool-1.0.7.2/config/install-sh
trunk/orca/packages/rrdtool-1.0.7.2/configure
trunk/orca/packages/rrdtool-1.0.7.2/configure.in
trunk/orca/packages/rrdtool-1.0.7.2/contrib/README
trunk/orca/packages/rrdtool-1.0.7.2/contrib/log2rrd/log2rrd.pl.in
trunk/orca/packages/rrdtool-1.0.7.2/contrib/trytime/trytime.c
trunk/orca/packages/rrdtool-1.0.7.2/doc/RRDs.html
trunk/orca/packages/rrdtool-1.0.7.2/doc/bin_dec_hex.html
trunk/orca/packages/rrdtool-1.0.7.2/doc/bin_dec_hex.pod
trunk/orca/packages/rrdtool-1.0.7.2/doc/bin_dec_hex.txt
trunk/orca/packages/rrdtool-1.0.7.2/doc/rrdcreate.html
trunk/orca/packages/rrdtool-1.0.7.2/doc/rrdcreate.pod
trunk/orca/packages/rrdtool-1.0.7.2/doc/rrdcreate.txt
trunk/orca/packages/rrdtool-1.0.7.2/doc/rrddump.html
trunk/orca/packages/rrdtool-1.0.7.2/doc/rrddump.pod
trunk/orca/packages/rrdtool-1.0.7.2/doc/rrddump.txt
trunk/orca/packages/rrdtool-1.0.7.2/doc/rrdfetch.html
trunk/orca/packages/rrdtool-1.0.7.2/doc/rrdfetch.pod
trunk/orca/packages/rrdtool-1.0.7.2/doc/rrdfetch.txt
trunk/orca/packages/rrdtool-1.0.7.2/doc/rrdgraph.html
trunk/orca/packages/rrdtool-1.0.7.2/doc/rrdgraph.pod
trunk/orca/packages/rrdtool-1.0.7.2/doc/rrdgraph.txt
trunk/orca/packages/rrdtool-1.0.7.2/doc/rrdlast.txt
trunk/orca/packages/rrdtool-1.0.7.2/doc/rrdresize.txt
trunk/orca/packages/rrdtool-1.0.7.2/doc/rrdtool.html
trunk/orca/packages/rrdtool-1.0.7.2/doc/rrdtool.pod
trunk/orca/packages/rrdtool-1.0.7.2/doc/rrdtool.txt
trunk/orca/packages/rrdtool-1.0.7.2/doc/rrdtune.txt
trunk/orca/packages/rrdtool-1.0.7.2/doc/rrdtutorial.html
trunk/orca/packages/rrdtool-1.0.7.2/doc/rrdtutorial.pod
trunk/orca/packages/rrdtool-1.0.7.2/doc/rrdtutorial.txt
trunk/orca/packages/rrdtool-1.0.7.2/doc/rrdupdate.html
trunk/orca/packages/rrdtool-1.0.7.2/doc/rrdupdate.pod
trunk/orca/packages/rrdtool-1.0.7.2/doc/rrdupdate.txt
trunk/orca/packages/rrdtool-1.0.7.2/gd1.3/Makefile.in
trunk/orca/packages/rrdtool-1.0.7.2/gd1.3/gd.c
trunk/orca/packages/rrdtool-1.0.7.2/gd1.3/gd.dsp
trunk/orca/packages/rrdtool-1.0.7.2/gd1.3/gd.h
trunk/orca/packages/rrdtool-1.0.7.2/gd1.3/gdfontg.c
trunk/orca/packages/rrdtool-1.0.7.2/gd1.3/gdfontg.h
trunk/orca/packages/rrdtool-1.0.7.2/gd1.3/gdfontl.c
trunk/orca/packages/rrdtool-1.0.7.2/gd1.3/gdfontl.h
trunk/orca/packages/rrdtool-1.0.7.2/gd1.3/gdfontmb.c
trunk/orca/packages/rrdtool-1.0.7.2/gd1.3/gdfontmb.h
trunk/orca/packages/rrdtool-1.0.7.2/gd1.3/gdfonts.c
trunk/orca/packages/rrdtool-1.0.7.2/gd1.3/gdfonts.h
trunk/orca/packages/rrdtool-1.0.7.2/gd1.3/gdfontt.c
trunk/orca/packages/rrdtool-1.0.7.2/gd1.3/gdfontt.h
trunk/orca/packages/rrdtool-1.0.7.2/gd1.3/index.html
trunk/orca/packages/rrdtool-1.0.7.2/gd1.3/webgif.c
trunk/orca/packages/rrdtool-1.0.7.2/perl-piped/MANIFEST
trunk/orca/packages/rrdtool-1.0.7.2/perl-piped/RRDp.pm
trunk/orca/packages/rrdtool-1.0.7.2/perl-shared/Makefile.PL
trunk/orca/packages/rrdtool-1.0.7.2/perl-shared/RRDs.pm
trunk/orca/packages/rrdtool-1.0.7.2/perl-shared/RRDs.xs
trunk/orca/packages/rrdtool-1.0.7.2/perl-shared/t/base.t
trunk/orca/packages/rrdtool-1.0.7.2/src/Makefile.in
trunk/orca/packages/rrdtool-1.0.7.2/src/getopt.c
trunk/orca/packages/rrdtool-1.0.7.2/src/parsetime.c
trunk/orca/packages/rrdtool-1.0.7.2/src/rrd_create.c
trunk/orca/packages/rrdtool-1.0.7.2/src/rrd_diff.c
trunk/orca/packages/rrdtool-1.0.7.2/src/rrd_dump.c
trunk/orca/packages/rrdtool-1.0.7.2/src/rrd_error.c
trunk/orca/packages/rrdtool-1.0.7.2/src/rrd_fetch.c
trunk/orca/packages/rrdtool-1.0.7.2/src/rrd_format.c
trunk/orca/packages/rrdtool-1.0.7.2/src/rrd_format.h
trunk/orca/packages/rrdtool-1.0.7.2/src/rrd_graph.c
trunk/orca/packages/rrdtool-1.0.7.2/src/rrd_last.c
trunk/orca/packages/rrdtool-1.0.7.2/src/rrd_open.c
trunk/orca/packages/rrdtool-1.0.7.2/src/rrd_resize.c
trunk/orca/packages/rrdtool-1.0.7.2/src/rrd_tool.c
trunk/orca/packages/rrdtool-1.0.7.2/src/rrd_tool.h
trunk/orca/packages/rrdtool-1.0.7.2/src/rrd_tune.c
trunk/orca/packages/rrdtool-1.0.7.2/src/rrd_update.c
trunk/orca/packages/rrdtool-1.0.7.2/src/rrdtool.dsp
trunk/orca/packages/rrdtool-1.0.7.2/src/rrdtool.dsw
trunk/orca/src/Makefile.in
trunk/orca/src/orca.pl.in
trunk/orca/src/upgrade_installation.pl
Log:
Load orca-0.24 into trunk/orca.
Modified: trunk/orca/configure
==============================================================================
--- trunk/orca/configure (original)
+++ trunk/orca/configure Sat Jul 13 19:22:22 2002
@@ -463,7 +463,7 @@
# A filename unique to this package, relative to the directory that
# configure is in, which we can look for to find out if srcdir is correct.
-ac_unique_file=src/orca.pl
+ac_unique_file=src/orca.pl.in
# Find the source files, if location was not specified.
if test -z "$srcdir"; then
@@ -648,11 +648,16 @@
# Define the directories containing packages that Orca makes use of here.
-# The path packages gets added where necessary.
-DIGEST_MD5_DIR=Digest-MD5-2.07
-MATH_INTERPOLATE_DIR=Math-Interpolate-1.02
-RRDTOOL_DIR=rrdtool-0.99.31
-STORABLE_DIR=Storable-0.6 at 3
+# The directory name packages where these packages are distributed with
+# Orca gets added where necessary.
+COMPRESS_ZLIB_DIR=Compress-Zlib-1.05
+DATA_DUMPER_DIR=Data-Dumper-2.101
+DIGEST_MD5_DIR=Digest-MD5-2.09
+MATH_INTERPOLATE_DIR=Math-Interpolate-1.05
+RRDTOOL_DIR=rrdtool-1.0.7.2
+STORABLE_DIR=Storable-0.6.5
+
+
@@ -730,7 +735,7 @@
)`
-WATCH_HTTPD=
+WATCH_WEB=
NCSA_LOG=
# Check whether --with-ncsa-log or --without-ncsa-log was given.
if test "${with_ncsa_log+set}" = set; then
@@ -742,7 +747,7 @@
;;
*) WEB_LOG="$withval"
NCSA_LOG=yes
- WATCH_HTTPD="-DWATCH_HTTPD"
+ WATCH_WEB="-DWATCH_WEB"
;;
esac
@@ -764,7 +769,7 @@
;;
*) WEB_LOG="$withval"
PROXY_LOG=yes
- WATCH_HTTPD="-DWATCH_HTTPD -DWATCH_PROXY"
+ WATCH_WEB="-DWATCH_WEB -DWATCH_PROXY"
;;
esac
@@ -787,7 +792,7 @@
{ echo "configure: error: *** You must supply an argument to the --with-squid-log option." 1>&2; exit 1; }
;;
*) WEB_LOG="$withval"
- WATCH_HTTPD="-DWATCH_HTTPD -DWATCH_SQUID"
+ WATCH_WEB="-DWATCH_WEB -DWATCH_SQUID"
;;
esac
@@ -797,13 +802,12 @@
-
# To get a default CFLAGS for this build, check for a C compiler. This
# is also needed to be ready to compile any Perl modules.
# Extract the first word of "gcc", so it can be a program name with args.
set dummy gcc; ac_word=$2
echo $ac_n "checking for $ac_word""... $ac_c" 1>&6
-echo "configure:807: checking for $ac_word" >&5
+echo "configure:811: checking for $ac_word" >&5
if eval "test \"`echo '$''{'ac_cv_prog_CC'+set}'`\" = set"; then
echo $ac_n "(cached) $ac_c" 1>&6
else
@@ -833,7 +837,7 @@
# Extract the first word of "cc", so it can be a program name with args.
set dummy cc; ac_word=$2
echo $ac_n "checking for $ac_word""... $ac_c" 1>&6
-echo "configure:837: checking for $ac_word" >&5
+echo "configure:841: checking for $ac_word" >&5
if eval "test \"`echo '$''{'ac_cv_prog_CC'+set}'`\" = set"; then
echo $ac_n "(cached) $ac_c" 1>&6
else
@@ -884,7 +888,7 @@
# Extract the first word of "cl", so it can be a program name with args.
set dummy cl; ac_word=$2
echo $ac_n "checking for $ac_word""... $ac_c" 1>&6
-echo "configure:888: checking for $ac_word" >&5
+echo "configure:892: checking for $ac_word" >&5
if eval "test \"`echo '$''{'ac_cv_prog_CC'+set}'`\" = set"; then
echo $ac_n "(cached) $ac_c" 1>&6
else
@@ -916,7 +920,7 @@
fi
echo $ac_n "checking whether the C compiler ($CC $CFLAGS $LDFLAGS) works""... $ac_c" 1>&6
-echo "configure:920: checking whether the C compiler ($CC $CFLAGS $LDFLAGS) works" >&5
+echo "configure:924: checking whether the C compiler ($CC $CFLAGS $LDFLAGS) works" >&5
ac_ext=c
# CFLAGS is not in ac_cpp because -g, -O, etc. are not valid cpp options.
@@ -927,12 +931,12 @@
cat > conftest.$ac_ext << EOF
-#line 931 "configure"
+#line 935 "configure"
#include "confdefs.h"
main(){return(0);}
EOF
-if { (eval echo configure:936: \"$ac_link\") 1>&5; (eval $ac_link) 2>&5; } && test -s conftest${ac_exeext}; then
+if { (eval echo configure:940: \"$ac_link\") 1>&5; (eval $ac_link) 2>&5; } && test -s conftest${ac_exeext}; then
ac_cv_prog_cc_works=yes
# If we can't run a trivial program, we are probably using a cross compiler.
if (./conftest; exit) 2>/dev/null; then
@@ -958,12 +962,12 @@
{ echo "configure: error: installation or configuration problem: C compiler cannot create executables." 1>&2; exit 1; }
fi
echo $ac_n "checking whether the C compiler ($CC $CFLAGS $LDFLAGS) is a cross-compiler""... $ac_c" 1>&6
-echo "configure:962: checking whether the C compiler ($CC $CFLAGS $LDFLAGS) is a cross-compiler" >&5
+echo "configure:966: checking whether the C compiler ($CC $CFLAGS $LDFLAGS) is a cross-compiler" >&5
echo "$ac_t""$ac_cv_prog_cc_cross" 1>&6
cross_compiling=$ac_cv_prog_cc_cross
echo $ac_n "checking whether we are using GNU C""... $ac_c" 1>&6
-echo "configure:967: checking whether we are using GNU C" >&5
+echo "configure:971: checking whether we are using GNU C" >&5
if eval "test \"`echo '$''{'ac_cv_prog_gcc'+set}'`\" = set"; then
echo $ac_n "(cached) $ac_c" 1>&6
else
@@ -972,7 +976,7 @@
yes;
#endif
EOF
-if { ac_try='${CC-cc} -E conftest.c'; { (eval echo configure:976: \"$ac_try\") 1>&5; (eval $ac_try) 2>&5; }; } | egrep yes >/dev/null 2>&1; then
+if { ac_try='${CC-cc} -E conftest.c'; { (eval echo configure:980: \"$ac_try\") 1>&5; (eval $ac_try) 2>&5; }; } | egrep yes >/dev/null 2>&1; then
ac_cv_prog_gcc=yes
else
ac_cv_prog_gcc=no
@@ -991,7 +995,7 @@
ac_save_CFLAGS="$CFLAGS"
CFLAGS=
echo $ac_n "checking whether ${CC-cc} accepts -g""... $ac_c" 1>&6
-echo "configure:995: checking whether ${CC-cc} accepts -g" >&5
+echo "configure:999: checking whether ${CC-cc} accepts -g" >&5
if eval "test \"`echo '$''{'ac_cv_prog_cc_g'+set}'`\" = set"; then
echo $ac_n "(cached) $ac_c" 1>&6
else
@@ -1024,7 +1028,7 @@
echo $ac_n "checking whether ${MAKE-make} sets \${MAKE}""... $ac_c" 1>&6
-echo "configure:1028: checking whether ${MAKE-make} sets \${MAKE}" >&5
+echo "configure:1032: checking whether ${MAKE-make} sets \${MAKE}" >&5
set dummy ${MAKE-make}; ac_make=`echo "$2" | sed 'y%./+-%__p_%'`
if eval "test \"`echo '$''{'ac_cv_prog_make_${ac_make}_set'+set}'`\" = set"; then
echo $ac_n "(cached) $ac_c" 1>&6
@@ -1069,21 +1073,19 @@
# sed with no file args requires a program.
test "$program_transform_name" = "" && program_transform_name="s,x,x,"
-for ac_prog in mawk gawk nawk awk
-do
-# Extract the first word of "$ac_prog", so it can be a program name with args.
-set dummy $ac_prog; ac_word=$2
+# Extract the first word of "bzip2", so it can be a program name with args.
+set dummy bzip2; ac_word=$2
echo $ac_n "checking for $ac_word""... $ac_c" 1>&6
-echo "configure:1078: checking for $ac_word" >&5
-if eval "test \"`echo '$''{'ac_cv_path_AWK'+set}'`\" = set"; then
+echo "configure:1080: checking for $ac_word" >&5
+if eval "test \"`echo '$''{'ac_cv_path_BZIP2'+set}'`\" = set"; then
echo $ac_n "(cached) $ac_c" 1>&6
else
- case "$AWK" in
+ case "$BZIP2" in
/*)
- ac_cv_path_AWK="$AWK" # Let the user override the test with a path.
+ ac_cv_path_BZIP2="$BZIP2" # Let the user override the test with a path.
;;
?:/*)
- ac_cv_path_AWK="$AWK" # Let the user override the test with a dos path.
+ ac_cv_path_BZIP2="$BZIP2" # Let the user override the test with a dos path.
;;
*)
IFS="${IFS= }"; ac_save_ifs="$IFS"; IFS=":"
@@ -1091,7 +1093,7 @@
for ac_dir in $ac_dummy; do
test -z "$ac_dir" && ac_dir=.
if test -f $ac_dir/$ac_word; then
- ac_cv_path_AWK="$ac_dir/$ac_word"
+ ac_cv_path_BZIP2="$ac_dir/$ac_word"
break
fi
done
@@ -1099,20 +1101,87 @@
;;
esac
fi
-AWK="$ac_cv_path_AWK"
-if test -n "$AWK"; then
- echo "$ac_t""$AWK" 1>&6
+BZIP2="$ac_cv_path_BZIP2"
+if test -n "$BZIP2"; then
+ echo "$ac_t""$BZIP2" 1>&6
else
echo "$ac_t""no" 1>&6
fi
-test -n "$AWK" && break
-done
+# Extract the first word of "bunzip2", so it can be a program name with args.
+set dummy bunzip2; ac_word=$2
+echo $ac_n "checking for $ac_word""... $ac_c" 1>&6
+echo "configure:1115: checking for $ac_word" >&5
+if eval "test \"`echo '$''{'ac_cv_path_BUNZIP2'+set}'`\" = set"; then
+ echo $ac_n "(cached) $ac_c" 1>&6
+else
+ case "$BUNZIP2" in
+ /*)
+ ac_cv_path_BUNZIP2="$BUNZIP2" # Let the user override the test with a path.
+ ;;
+ ?:/*)
+ ac_cv_path_BUNZIP2="$BUNZIP2" # Let the user override the test with a dos path.
+ ;;
+ *)
+ IFS="${IFS= }"; ac_save_ifs="$IFS"; IFS=":"
+ ac_dummy="$PATH"
+ for ac_dir in $ac_dummy; do
+ test -z "$ac_dir" && ac_dir=.
+ if test -f $ac_dir/$ac_word; then
+ ac_cv_path_BUNZIP2="$ac_dir/$ac_word"
+ break
+ fi
+ done
+ IFS="$ac_save_ifs"
+ ;;
+esac
+fi
+BUNZIP2="$ac_cv_path_BUNZIP2"
+if test -n "$BUNZIP2"; then
+ echo "$ac_t""$BUNZIP2" 1>&6
+else
+ echo "$ac_t""no" 1>&6
+fi
+
+# Extract the first word of "compress", so it can be a program name with args.
+set dummy compress; ac_word=$2
+echo $ac_n "checking for $ac_word""... $ac_c" 1>&6
+echo "configure:1150: checking for $ac_word" >&5
+if eval "test \"`echo '$''{'ac_cv_path_COMPRESS'+set}'`\" = set"; then
+ echo $ac_n "(cached) $ac_c" 1>&6
+else
+ case "$COMPRESS" in
+ /*)
+ ac_cv_path_COMPRESS="$COMPRESS" # Let the user override the test with a path.
+ ;;
+ ?:/*)
+ ac_cv_path_COMPRESS="$COMPRESS" # Let the user override the test with a dos path.
+ ;;
+ *)
+ IFS="${IFS= }"; ac_save_ifs="$IFS"; IFS=":"
+ ac_dummy="$PATH"
+ for ac_dir in $ac_dummy; do
+ test -z "$ac_dir" && ac_dir=.
+ if test -f $ac_dir/$ac_word; then
+ ac_cv_path_COMPRESS="$ac_dir/$ac_word"
+ break
+ fi
+ done
+ IFS="$ac_save_ifs"
+ ;;
+esac
+fi
+COMPRESS="$ac_cv_path_COMPRESS"
+if test -n "$COMPRESS"; then
+ echo "$ac_t""$COMPRESS" 1>&6
+else
+ echo "$ac_t""no" 1>&6
+fi
# Extract the first word of "cut", so it can be a program name with args.
set dummy cut; ac_word=$2
echo $ac_n "checking for $ac_word""... $ac_c" 1>&6
-echo "configure:1116: checking for $ac_word" >&5
+echo "configure:1185: checking for $ac_word" >&5
if eval "test \"`echo '$''{'ac_cv_path_CUT'+set}'`\" = set"; then
echo $ac_n "(cached) $ac_c" 1>&6
else
@@ -1148,7 +1217,7 @@
# Extract the first word of "expr", so it can be a program name with args.
set dummy expr; ac_word=$2
echo $ac_n "checking for $ac_word""... $ac_c" 1>&6
-echo "configure:1152: checking for $ac_word" >&5
+echo "configure:1221: checking for $ac_word" >&5
if eval "test \"`echo '$''{'ac_cv_path_EXPR'+set}'`\" = set"; then
echo $ac_n "(cached) $ac_c" 1>&6
else
@@ -1181,27 +1250,27 @@
echo "$ac_t""no" 1>&6
fi
-# Extract the first word of "se", so it can be a program name with args.
-set dummy se; ac_word=$2
+# Extract the first word of "gzip", so it can be a program name with args.
+set dummy gzip; ac_word=$2
echo $ac_n "checking for $ac_word""... $ac_c" 1>&6
-echo "configure:1188: checking for $ac_word" >&5
-if eval "test \"`echo '$''{'ac_cv_path_SE'+set}'`\" = set"; then
+echo "configure:1257: checking for $ac_word" >&5
+if eval "test \"`echo '$''{'ac_cv_path_GZIP'+set}'`\" = set"; then
echo $ac_n "(cached) $ac_c" 1>&6
else
- case "$SE" in
+ case "$GZIP" in
/*)
- ac_cv_path_SE="$SE" # Let the user override the test with a path.
+ ac_cv_path_GZIP="$GZIP" # Let the user override the test with a path.
;;
?:/*)
- ac_cv_path_SE="$SE" # Let the user override the test with a dos path.
+ ac_cv_path_GZIP="$GZIP" # Let the user override the test with a dos path.
;;
*)
IFS="${IFS= }"; ac_save_ifs="$IFS"; IFS=":"
- ac_dummy="/opt/RICHPse/bin:$PATH"
+ ac_dummy="$PATH"
for ac_dir in $ac_dummy; do
test -z "$ac_dir" && ac_dir=.
if test -f $ac_dir/$ac_word; then
- ac_cv_path_SE="$ac_dir/$ac_word"
+ ac_cv_path_GZIP="$ac_dir/$ac_word"
break
fi
done
@@ -1209,26 +1278,26 @@
;;
esac
fi
-SE="$ac_cv_path_SE"
-if test -n "$SE"; then
- echo "$ac_t""$SE" 1>&6
+GZIP="$ac_cv_path_GZIP"
+if test -n "$GZIP"; then
+ echo "$ac_t""$GZIP" 1>&6
else
echo "$ac_t""no" 1>&6
fi
-# Extract the first word of "uname", so it can be a program name with args.
-set dummy uname; ac_word=$2
+# Extract the first word of "gunzip", so it can be a program name with args.
+set dummy gunzip; ac_word=$2
echo $ac_n "checking for $ac_word""... $ac_c" 1>&6
-echo "configure:1223: checking for $ac_word" >&5
-if eval "test \"`echo '$''{'ac_cv_path_UNAME'+set}'`\" = set"; then
+echo "configure:1292: checking for $ac_word" >&5
+if eval "test \"`echo '$''{'ac_cv_path_GUNZIP'+set}'`\" = set"; then
echo $ac_n "(cached) $ac_c" 1>&6
else
- case "$UNAME" in
+ case "$GUNZIP" in
/*)
- ac_cv_path_UNAME="$UNAME" # Let the user override the test with a path.
+ ac_cv_path_GUNZIP="$GUNZIP" # Let the user override the test with a path.
;;
?:/*)
- ac_cv_path_UNAME="$UNAME" # Let the user override the test with a dos path.
+ ac_cv_path_GUNZIP="$GUNZIP" # Let the user override the test with a dos path.
;;
*)
IFS="${IFS= }"; ac_save_ifs="$IFS"; IFS=":"
@@ -1236,32 +1305,67 @@
for ac_dir in $ac_dummy; do
test -z "$ac_dir" && ac_dir=.
if test -f $ac_dir/$ac_word; then
- ac_cv_path_UNAME="$ac_dir/$ac_word"
+ ac_cv_path_GUNZIP="$ac_dir/$ac_word"
break
fi
done
IFS="$ac_save_ifs"
- test -z "$ac_cv_path_UNAME" && ac_cv_path_UNAME="uname"
;;
esac
fi
-UNAME="$ac_cv_path_UNAME"
-if test -n "$UNAME"; then
- echo "$ac_t""$UNAME" 1>&6
+GUNZIP="$ac_cv_path_GUNZIP"
+if test -n "$GUNZIP"; then
+ echo "$ac_t""$GUNZIP" 1>&6
else
echo "$ac_t""no" 1>&6
fi
+for ac_prog in mawk gawk nawk awk
+do
+# Extract the first word of "$ac_prog", so it can be a program name with args.
+set dummy $ac_prog; ac_word=$2
+echo $ac_n "checking for $ac_word""... $ac_c" 1>&6
+echo "configure:1329: checking for $ac_word" >&5
+if eval "test \"`echo '$''{'ac_cv_path_AWK'+set}'`\" = set"; then
+ echo $ac_n "(cached) $ac_c" 1>&6
+else
+ case "$AWK" in
+ /*)
+ ac_cv_path_AWK="$AWK" # Let the user override the test with a path.
+ ;;
+ ?:/*)
+ ac_cv_path_AWK="$AWK" # Let the user override the test with a dos path.
+ ;;
+ *)
+ IFS="${IFS= }"; ac_save_ifs="$IFS"; IFS=":"
+ ac_dummy="$PATH"
+ for ac_dir in $ac_dummy; do
+ test -z "$ac_dir" && ac_dir=.
+ if test -f $ac_dir/$ac_word; then
+ ac_cv_path_AWK="$ac_dir/$ac_word"
+ break
+ fi
+ done
+ IFS="$ac_save_ifs"
+ ;;
+esac
+fi
+AWK="$ac_cv_path_AWK"
+if test -n "$AWK"; then
+ echo "$ac_t""$AWK" 1>&6
+else
+ echo "$ac_t""no" 1>&6
+fi
-# Include the file that defines BORP_PERL_RUN.
-
-
+test -n "$AWK" && break
+done
+# Include the file that defines BORP_PERL_RUN.
# Extract the first word of "perl", so it can be a program name with args.
set dummy perl; ac_word=$2
echo $ac_n "checking for $ac_word""... $ac_c" 1>&6
-echo "configure:1265: checking for $ac_word" >&5
+echo "configure:1369: checking for $ac_word" >&5
if eval "test \"`echo '$''{'ac_cv_path_PERL'+set}'`\" = set"; then
echo $ac_n "(cached) $ac_c" 1>&6
else
@@ -1299,7 +1403,7 @@
else
echo $ac_n "checking if '$PERL' will run Perl scripts""... $ac_c" 1>&6
-echo "configure:1303: checking if '$PERL' will run Perl scripts" >&5
+echo "configure:1407: checking if '$PERL' will run Perl scripts" >&5
rm -f conftest.BZ
cat > conftest.BZ <<EOF
#!$PERL
@@ -1320,10 +1424,206 @@
PERL_HEAD="../config/$PERL_HEAD"
-# Check for necessary Perl modules.
+# Extract the first word of "uname", so it can be a program name with args.
+set dummy uname; ac_word=$2
+echo $ac_n "checking for $ac_word""... $ac_c" 1>&6
+echo "configure:1431: checking for $ac_word" >&5
+if eval "test \"`echo '$''{'ac_cv_path_UNAME'+set}'`\" = set"; then
+ echo $ac_n "(cached) $ac_c" 1>&6
+else
+ case "$UNAME" in
+ /*)
+ ac_cv_path_UNAME="$UNAME" # Let the user override the test with a path.
+ ;;
+ ?:/*)
+ ac_cv_path_UNAME="$UNAME" # Let the user override the test with a dos path.
+ ;;
+ *)
+ IFS="${IFS= }"; ac_save_ifs="$IFS"; IFS=":"
+ ac_dummy="$PATH"
+ for ac_dir in $ac_dummy; do
+ test -z "$ac_dir" && ac_dir=.
+ if test -f $ac_dir/$ac_word; then
+ ac_cv_path_UNAME="$ac_dir/$ac_word"
+ break
+ fi
+ done
+ IFS="$ac_save_ifs"
+ test -z "$ac_cv_path_UNAME" && ac_cv_path_UNAME="uname"
+ ;;
+esac
+fi
+UNAME="$ac_cv_path_UNAME"
+if test -n "$UNAME"; then
+ echo "$ac_t""$UNAME" 1>&6
+else
+ echo "$ac_t""no" 1>&6
+fi
+
+# Extract the first word of "uncompress", so it can be a program name with args.
+set dummy uncompress; ac_word=$2
+echo $ac_n "checking for $ac_word""... $ac_c" 1>&6
+echo "configure:1467: checking for $ac_word" >&5
+if eval "test \"`echo '$''{'ac_cv_path_UNCOMPRESS'+set}'`\" = set"; then
+ echo $ac_n "(cached) $ac_c" 1>&6
+else
+ case "$UNCOMPRESS" in
+ /*)
+ ac_cv_path_UNCOMPRESS="$UNCOMPRESS" # Let the user override the test with a path.
+ ;;
+ ?:/*)
+ ac_cv_path_UNCOMPRESS="$UNCOMPRESS" # Let the user override the test with a dos path.
+ ;;
+ *)
+ IFS="${IFS= }"; ac_save_ifs="$IFS"; IFS=":"
+ ac_dummy="$PATH"
+ for ac_dir in $ac_dummy; do
+ test -z "$ac_dir" && ac_dir=.
+ if test -f $ac_dir/$ac_word; then
+ ac_cv_path_UNCOMPRESS="$ac_dir/$ac_word"
+ break
+ fi
+ done
+ IFS="$ac_save_ifs"
+ ;;
+esac
+fi
+UNCOMPRESS="$ac_cv_path_UNCOMPRESS"
+if test -n "$UNCOMPRESS"; then
+ echo "$ac_t""$UNCOMPRESS" 1>&6
+else
+ echo "$ac_t""no" 1>&6
+fi
+
+
+# Determine the correct flags to compress files depending upon the compress
+# programs available.
+COMPRESSOR=
+UNCOMPRESSOR_PIPE=
+if test -n "$BZIP2" -a -n "$BUNZIP2"; then
+ COMPRESSOR="$BZIP2 -9"
+ UNCOMPRESSOR_PIPE="$BUNZIP2 -c"
+elif test -n "$GZIP" -a -n "$GUNZIP"; then
+ COMPRESSOR="$GZIP -9"
+ UNCOMPRESSOR_PIPE="$GUNZIP -c"
+elif test -n "$COMPRESS" -a -n "$UNCOMPRESS"; then
+ COMPRESSOR="$COMPRESS"
+ UNCOMPRESSOR_PIPE="$UNCOMPRESS -c"
+fi
+
+
+
+# Now we check for those portions of Orca that should be built and set
+# up for installation. The first step is to check for operating system
+# specific modules. For Solaris hosts, orcallator.se is built and
+# installed. This requires the additional building of a librrd.so
+# installed in libdir. Then we check for the proper Perl modules.
+echo $ac_n "checking for solaris host for orcallator install""... $ac_c" 1>&6
+echo "configure:1523: checking for solaris host for orcallator install" >&5
+case "$target" in
+ *-solaris*)
+ BUILD_ORCALLATOR=yes
+ ORCALLATOR_SUBDIR=orcallator
+ # Add --enable-shared to the configure options for RRDtool if it is
+ # not already declared.
+ expr "$CONFIGURE_COMMAND_LINE" : "--enable-shared" >/dev/null 2>&1 || CONFIGURE_COMMAND_LINE="$CONFIGURE_COMMAND_LINE --enable-shared"
+
+ INSTALL_LIB_RRDTOOL=install_lib_rrdtool
+ MAKE_RRDTOOL=make_rrdtool
+ TEST_RRDTOOL=test_rrdtool
+ INSTALL_PERL_RRDTOOL=
+ CLEAN_RRDTOOL=clean_rrdtool
+ DISTCLEAN_RRDTOOL=distclean_rrdtool
+ ;;
+ *)
+ INSTALL_LIB_RRDTOOL=
+ BUILD_ORCALLATOR=no
+ ORCALLATOR_SUBDIR=
+ ;;
+esac
+
+
+
+echo "$ac_t""$BUILD_ORCALLATOR" 1>&6
+if test "$BUILD_ORCALLATOR" = "yes"; then
+ # Extract the first word of "se", so it can be a program name with args.
+set dummy se; ac_word=$2
+echo $ac_n "checking for $ac_word""... $ac_c" 1>&6
+echo "configure:1553: checking for $ac_word" >&5
+if eval "test \"`echo '$''{'ac_cv_path_SE'+set}'`\" = set"; then
+ echo $ac_n "(cached) $ac_c" 1>&6
+else
+ case "$SE" in
+ /*)
+ ac_cv_path_SE="$SE" # Let the user override the test with a path.
+ ;;
+ ?:/*)
+ ac_cv_path_SE="$SE" # Let the user override the test with a dos path.
+ ;;
+ *)
+ IFS="${IFS= }"; ac_save_ifs="$IFS"; IFS=":"
+ ac_dummy="$PATH:/opt/RICHPse/bin"
+ for ac_dir in $ac_dummy; do
+ test -z "$ac_dir" && ac_dir=.
+ if test -f $ac_dir/$ac_word; then
+ ac_cv_path_SE="$ac_dir/$ac_word"
+ break
+ fi
+ done
+ IFS="$ac_save_ifs"
+ ;;
+esac
+fi
+SE="$ac_cv_path_SE"
+if test -n "$SE"; then
+ echo "$ac_t""$SE" 1>&6
+else
+ echo "$ac_t""no" 1>&6
+fi
+
+fi
+
+if test "$borp_cv_perl_compress_zlib" = no; then
+ MAKE_COMPRESS_ZLIB=make_compress_zlib
+ TEST_COMPRESS_ZLIB=test_compress_zlib
+ INSTALL_PERL_COMPRESS_ZLIB=install_perl_compress_zlib
+ CLEAN_COMPRESS_ZLIB=clean_compress_zlib
+ DISTCLEAN_COMPRESS_ZLIB=distclean_compress_zlib
+fi
+
+
+
+
+
+
+
+ echo $ac_n "checking if Perl module Data::Dumper version 2.101 is installed""... $ac_c" 1>&6
+echo "configure:1602: checking if Perl module Data::Dumper version 2.101 is installed" >&5
+ if $PERL ./config/check_for_perl_mod Data::Dumper 2.101; then
+ borp_cv_perl_data_dumper=yes
+
+ else
+ borp_cv_perl_data_dumper=no
+
+ fi
+ echo "$ac_t""$borp_cv_perl_data_dumper" 1>&6
+
+if test "$borp_cv_perl_data_dumper" = no; then
+ MAKE_DATA_DUMPER=make_data_dumper
+ TEST_DATA_DUMPER=test_data_dumper
+ INSTALL_PERL_DATA_DUMPER=install_perl_data_dumper
+ CLEAN_DATA_DUMPER=clean_data_dumper
+ DISTCLEAN_DATA_DUMPER=distclean_data_dumper
+fi
+
+
+
+
+
+
echo $ac_n "checking if Perl module Digest::MD5 version 2.00 is installed""... $ac_c" 1>&6
-echo "configure:1327: checking if Perl module Digest::MD5 version 2.00 is installed" >&5
+echo "configure:1627: checking if Perl module Digest::MD5 version 2.00 is installed" >&5
if $PERL ./config/check_for_perl_mod Digest::MD5 2.00; then
borp_cv_perl_digest_md5=yes
@@ -1336,7 +1636,7 @@
if test "$borp_cv_perl_digest_md5" = no; then
MAKE_DIGEST_MD5=make_digest_md5
TEST_DIGEST_MD5=test_digest_md5
- INSTALL_DIGEST_MD5=install_digest_md5
+ INSTALL_PERL_DIGEST_MD5=install_perl_digest_md5
CLEAN_DIGEST_MD5=clean_digest_md5
DISTCLEAN_DIGEST_MD5=distclean_digest_md5
fi
@@ -1347,9 +1647,9 @@
- echo $ac_n "checking if Perl module Math::Interpolate version 1.00 is installed""... $ac_c" 1>&6
-echo "configure:1352: checking if Perl module Math::Interpolate version 1.00 is installed" >&5
- if $PERL ./config/check_for_perl_mod Math::Interpolate 1.00; then
+ echo $ac_n "checking if Perl module Math::Interpolate version 1.04 is installed""... $ac_c" 1>&6
+echo "configure:1652: checking if Perl module Math::Interpolate version 1.04 is installed" >&5
+ if $PERL ./config/check_for_perl_mod Math::Interpolate 1.04; then
borp_cv_perl_math_interpolate=yes
else
@@ -1361,7 +1661,7 @@
if test "$borp_cv_perl_math_interpolate" = no; then
MAKE_MATH_INTERPOLATE=make_math_interpolate
TEST_MATH_INTERPOLATE=test_math_interpolate
- INSTALL_MATH_INTERPOLATE=install_math_interpolate
+ INSTALL_PERL_MATH_INTERPOLATE=install_perl_math_interpolate
CLEAN_MATH_INTERPOLATE=clean_math_interpolate
DISTCLEAN_MATH_INTERPOLATE=distclean_math_interpolate
fi
@@ -1372,9 +1672,9 @@
- echo $ac_n "checking if Perl module RRDs version 0.99029 is installed""... $ac_c" 1>&6
-echo "configure:1377: checking if Perl module RRDs version 0.99029 is installed" >&5
- if $PERL ./config/check_for_perl_mod RRDs 0.99029; then
+ echo $ac_n "checking if Perl module RRDs version 1.000072 is installed""... $ac_c" 1>&6
+echo "configure:1677: checking if Perl module RRDs version 1.000072 is installed" >&5
+ if $PERL ./config/check_for_perl_mod RRDs 1.000072; then
borp_cv_perl_rdds=yes
else
@@ -1386,7 +1686,7 @@
if test "$borp_cv_perl_rdds" = no; then
MAKE_RRDTOOL=make_rrdtool
TEST_RRDTOOL=test_rrdtool
- INSTALL_RRDTOOL=install_rrdtool
+ INSTALL_PERL_RRDTOOL=install_perl_rrdtool
CLEAN_RRDTOOL=clean_rrdtool
DISTCLEAN_RRDTOOL=distclean_rrdtool
fi
@@ -1398,7 +1698,7 @@
echo $ac_n "checking if Perl module Storable version 0.603 is installed""... $ac_c" 1>&6
-echo "configure:1402: checking if Perl module Storable version 0.603 is installed" >&5
+echo "configure:1702: checking if Perl module Storable version 0.603 is installed" >&5
if $PERL ./config/check_for_perl_mod Storable 0.603; then
borp_cv_perl_storable=yes
@@ -1411,7 +1711,7 @@
if test "$borp_cv_perl_storable" = no; then
MAKE_STORABLE=make_storable
TEST_STORABLE=test_storable
- INSTALL_STORABLE=install_storable
+ INSTALL_PERL_STORABLE=install_perl_storable
CLEAN_STORABLE=clean_storable
DISTCLEAN_STORABLE=distclean_storable
fi
@@ -1421,7 +1721,6 @@
-
# Define the INSTALL and MKDIR variables to point to the scripts in
# the config directory.
INSTALL="../config/install-sh -c"
@@ -1431,8 +1730,17 @@
#--------------------------------------------------------------------
# Generate the Makefiles and shell scripts with the
-# variable substitution.
+# variable substitutions.
#--------------------------------------------------------------------
+if test "$BUILD_ORCALLATOR" = "yes"; then
+ ORCALLATOR_OUTPUT="orcallator/orcallator.cfg
+ orcallator/orcallator_running.pl
+ orcallator/restart_orcallator.sh
+ orcallator/start_orcallator.sh
+ orcallator/stop_orcallator.sh
+ orcallator/Makefile"
+fi
+
trap '' 1 2 15
cat > confcache <<\EOF
# This file is a shell script that caches the results of configure
@@ -1547,14 +1855,11 @@
trap 'rm -fr `echo "config/PerlHead1
config/PerlHead2
+ lib/Makefile
packages/Makefile
- src/orcallator_running.pl
- src/restart_orcallator.sh
- src/start_orcallator.sh
- src/stop_orcallator.sh
+ src/orca.pl
src/Makefile
- lib/orcallator.cfg
- lib/Makefile
+ $ORCALLATOR_OUTPUT
docs/Makefile
Makefile" | sed "s/:[^ ]*//g"` conftest*; exit 1' 1 2 15
EOF
@@ -1604,6 +1909,8 @@
s%@build_vendor@%$build_vendor%g
s%@build_os@%$build_os%g
s%@CONFIGURE_COMMAND_LINE@%$CONFIGURE_COMMAND_LINE%g
+s%@COMPRESS_ZLIB_DIR@%$COMPRESS_ZLIB_DIR%g
+s%@DATA_DUMPER_DIR@%$DATA_DUMPER_DIR%g
s%@DIGEST_MD5_DIR@%$DIGEST_MD5_DIR%g
s%@MATH_INTERPOLATE_DIR@%$MATH_INTERPOLATE_DIR%g
s%@RRDTOOL_DIR@%$RRDTOOL_DIR%g
@@ -1611,35 +1918,55 @@
s%@RRD_DIR@%$RRD_DIR%g
s%@HTML_DIR@%$HTML_DIR%g
s%@ORCALLATOR_DIR@%$ORCALLATOR_DIR%g
-s%@WATCH_HTTPD@%$WATCH_HTTPD%g
+s%@WATCH_WEB@%$WATCH_WEB%g
s%@WEB_LOG@%$WEB_LOG%g
s%@CC@%$CC%g
s%@SET_MAKE@%$SET_MAKE%g
-s%@AWK@%$AWK%g
+s%@BZIP2@%$BZIP2%g
+s%@BUNZIP2@%$BUNZIP2%g
+s%@COMPRESS@%$COMPRESS%g
s%@CUT@%$CUT%g
s%@EXPR@%$EXPR%g
-s%@SE@%$SE%g
-s%@UNAME@%$UNAME%g
+s%@GZIP@%$GZIP%g
+s%@GUNZIP@%$GUNZIP%g
+s%@AWK@%$AWK%g
s%@PERL@%$PERL%g
s%@PERL_HEAD@%$PERL_HEAD%g
+s%@UNAME@%$UNAME%g
+s%@UNCOMPRESS@%$UNCOMPRESS%g
+s%@COMPRESSOR@%$COMPRESSOR%g
+s%@UNCOMPRESSOR_PIPE@%$UNCOMPRESSOR_PIPE%g
+s%@ORCALLATOR_SUBDIR@%$ORCALLATOR_SUBDIR%g
+s%@INSTALL_LIB_RRDTOOL@%$INSTALL_LIB_RRDTOOL%g
+s%@SE@%$SE%g
+s%@MAKE_COMPRESS_ZLIB@%$MAKE_COMPRESS_ZLIB%g
+s%@TEST_COMPRESS_ZLIB@%$TEST_COMPRESS_ZLIB%g
+s%@INSTALL_PERL_COMPRESS_ZLIB@%$INSTALL_PERL_COMPRESS_ZLIB%g
+s%@CLEAN_COMPRESS_ZLIB@%$CLEAN_COMPRESS_ZLIB%g
+s%@DISTCLEAN_COMPRESS_ZLIB@%$DISTCLEAN_COMPRESS_ZLIB%g
+s%@MAKE_DATA_DUMPER@%$MAKE_DATA_DUMPER%g
+s%@TEST_DATA_DUMPER@%$TEST_DATA_DUMPER%g
+s%@INSTALL_PERL_DATA_DUMPER@%$INSTALL_PERL_DATA_DUMPER%g
+s%@CLEAN_DATA_DUMPER@%$CLEAN_DATA_DUMPER%g
+s%@DISTCLEAN_DATA_DUMPER@%$DISTCLEAN_DATA_DUMPER%g
s%@MAKE_DIGEST_MD5@%$MAKE_DIGEST_MD5%g
s%@TEST_DIGEST_MD5@%$TEST_DIGEST_MD5%g
-s%@INSTALL_DIGEST_MD5@%$INSTALL_DIGEST_MD5%g
+s%@INSTALL_PERL_DIGEST_MD5@%$INSTALL_PERL_DIGEST_MD5%g
s%@CLEAN_DIGEST_MD5@%$CLEAN_DIGEST_MD5%g
s%@DISTCLEAN_DIGEST_MD5@%$DISTCLEAN_DIGEST_MD5%g
s%@MAKE_MATH_INTERPOLATE@%$MAKE_MATH_INTERPOLATE%g
s%@TEST_MATH_INTERPOLATE@%$TEST_MATH_INTERPOLATE%g
-s%@INSTALL_MATH_INTERPOLATE@%$INSTALL_MATH_INTERPOLATE%g
+s%@INSTALL_PERL_MATH_INTERPOLATE@%$INSTALL_PERL_MATH_INTERPOLATE%g
s%@CLEAN_MATH_INTERPOLATE@%$CLEAN_MATH_INTERPOLATE%g
s%@DISTCLEAN_MATH_INTERPOLATE@%$DISTCLEAN_MATH_INTERPOLATE%g
s%@MAKE_RRDTOOL@%$MAKE_RRDTOOL%g
s%@TEST_RRDTOOL@%$TEST_RRDTOOL%g
-s%@INSTALL_RRDTOOL@%$INSTALL_RRDTOOL%g
+s%@INSTALL_PERL_RRDTOOL@%$INSTALL_PERL_RRDTOOL%g
s%@CLEAN_RRDTOOL@%$CLEAN_RRDTOOL%g
s%@DISTCLEAN_RRDTOOL@%$DISTCLEAN_RRDTOOL%g
s%@MAKE_STORABLE@%$MAKE_STORABLE%g
s%@TEST_STORABLE@%$TEST_STORABLE%g
-s%@INSTALL_STORABLE@%$INSTALL_STORABLE%g
+s%@INSTALL_PERL_STORABLE@%$INSTALL_PERL_STORABLE%g
s%@CLEAN_STORABLE@%$CLEAN_STORABLE%g
s%@DISTCLEAN_STORABLE@%$DISTCLEAN_STORABLE%g
s%@INSTALL@%$INSTALL%g
@@ -1687,14 +2014,11 @@
CONFIG_FILES=\${CONFIG_FILES-"config/PerlHead1
config/PerlHead2
+ lib/Makefile
packages/Makefile
- src/orcallator_running.pl
- src/restart_orcallator.sh
- src/start_orcallator.sh
- src/stop_orcallator.sh
+ src/orca.pl
src/Makefile
- lib/orcallator.cfg
- lib/Makefile
+ $ORCALLATOR_OUTPUT
docs/Makefile
Makefile"}
EOF
@@ -1763,14 +2087,13 @@
test "$no_create" = yes || ${CONFIG_SHELL-/bin/sh} $CONFIG_STATUS || exit 1
-if test "$borp_cv_perl_rdds" != "yes"; then
- echo ""
- echo "Running configure in packages/$RRDTOOL_DIR to create RRDtool and RRDs.pm."
- echo ""
- echo "(cd packages/$RRDTOOL_DIR; ./configure $CONFIGURE_COMMAND_LINE --cache-file=../../config.cache)"
- echo ""
- (cd packages/$RRDTOOL_DIR; ./configure $CONFIGURE_COMMAND_LINE --cache-file=../../config.cache)
-fi
+command="(cd packages/$RRDTOOL_DIR; ./configure $CONFIGURE_COMMAND_LINE --cache-file=../../config.cache)"
+echo ""
+echo "Running configure in packages/$RRDTOOL_DIR to create RRDtool and RRDs.pm."
+echo ""
+echo $command
+echo ""
+eval $command
if test -z "$WEB_LOG"; then
echo "configure: warning: *** Unless you use a --with-*-log option you will not gather WWW log data." 1>&2
Modified: trunk/orca/Makefile.in
==============================================================================
--- trunk/orca/Makefile.in (original)
+++ trunk/orca/Makefile.in Sat Jul 13 19:22:23 2002
@@ -1,47 +1,44 @@
@SET_MAKE@
-SUBDIRS = packages lib src docs
+SUBDIRS = packages lib src @ORCALLATOR_SUBDIR@ docs
PREFIX = @prefix@
+MAKE_RRDTOOL = @MAKE_RRDTOOL@
ORCALLATOR_DIR = @ORCALLATOR_DIR@
RRD_DIR = @RRD_DIR@
CFLAGS = @CFLAGS@
-all: configure Makefile
- @for dir in $(SUBDIRS); do \
- echo "cd $$dir && $(MAKE) CFLAGS=$(CFLAGS)"; \
- (cd $$dir && $(MAKE) CFLAGS="$(CFLAGS)"); \
+all: configure config/PerlHead1 config/PerlHead2 Makefile
+ @for dir in $(SUBDIRS); do \
+ echo "cd $$dir && $(MAKE) CFLAGS=$(CFLAGS)"; \
+ (cd $$dir && $(MAKE) CFLAGS="$(CFLAGS)"); \
done
-test:
- (cd packages; $(MAKE) CFLAGS="$(CFLAGS)" test)
+test: configure Makefile
-migrate:
- (cd src; $(MAKE) migrate)
+upgrade:
+ cd src && $(MAKE) upgrade_installation
-install:
+install: $(INSTALL_RRDTOOL)
./config/mkinstalldirs $(ORCALLATOR_DIR)
- ./config/mkinstalldirs $(RRD_DIR)
+ ./config/mkinstalldirs $(RRD_DIR)/orcallator
@for dir in $(SUBDIRS); do \
echo "cd $$dir && $(MAKE) install"; \
(cd $$dir && $(MAKE) install); \
done
-modules:
- (cd packages && $(MAKE) CFLAGS="$(CFLAGS)" modules)
-
test_modules:
- (cd packages && $(MAKE) CFLAGS="$(CFLAGS)" test_modules)
+ cd packages && $(MAKE) CFLAGS="$(CFLAGS)" test_modules
install_modules:
- (cd packages && $(MAKE) CFLAGS="$(CFLAGS)" install_modules)
+ cd packages && $(MAKE) CFLAGS="$(CFLAGS)" install_modules
-clean:
+clean: $(CLEAN_RRDTOOL)
@for dir in $(SUBDIRS); do \
echo "cd $$dir && $(MAKE) clean"; \
(cd $$dir && $(MAKE) clean); \
done
-distclean:
+distclean: $(DISTCLEAN_RRDTOOL)
@for dir in $(SUBDIRS); do \
echo "cd $$dir && $(MAKE) distclean"; \
(cd $$dir && $(MAKE) distclean); \
@@ -49,9 +46,19 @@
$(RM) config/PerlHead1 config/PerlHead2
$(RM) config.cache config.log config.status Makefile
+to-autoconf:
+ aclocal -I config --output=config/aclocal.m4
+ autoconf --localdir=config
+
configure: configure.in
- autoconf
+ $(MAKE) to-autoconf
./config.status
Makefile: Makefile.in
- ./config.status
+ CONFIG_FILES=Makefile ./config.status
+
+config/PerlHead1: config/PerlHead1.in
+ CONFIG_FILES=config/PerlHead1 ./config.status
+
+config/PerlHead2: config/PerlHead2.in
+ CONFIG_FILES=config/PerlHead2 ./config.status
Added: trunk/orca/orcallator/Makefile.in
==============================================================================
--- trunk/orca/orcallator/Makefile.in (original)
+++ trunk/orca/orcallator/Makefile.in Sat Jul 13 19:22:23 2002
@@ -0,0 +1,65 @@
+ at SET_MAKE@
+
+prefix = @prefix@
+exec_prefix = @exec_prefix@
+bindir = @bindir@
+libdir = @libdir@
+MKDIR = @MKDIR@
+INSTALL = @INSTALL@
+PERL_HEAD = @PERL_HEAD@
+ORCALLATOR_DIR = @ORCALLATOR_DIR@
+RRD_DIR = @RRD_DIR@
+RRDTOOL_DIR = @RRDTOOL_DIR@
+PERL_SCRIPTS = orcallator_column orcallator_running
+SHELL_SCRIPTS = restart_orcallator stop_orcallator start_orcallator
+TARGETS = $(PERL_SCRIPTS) $(SHELL_SCRIPTS)
+
+all: $(TARGETS) orcallator.cfg
+
+install: all
+ $(MKDIR) $(bindir)
+ $(MKDIR) $(libdir)
+ @for file in $(TARGETS); do \
+ echo $(INSTALL) $$file $(bindir); \
+ $(INSTALL) $$file $(bindir); \
+ done
+ if test -r $(libdir)/orcallator.cfg; then \
+ cp -p $(libdir)/orcallator.cfg $(libdir)/orcallator.cfg.`date +%Y-%m-%d-%H:%M:%S`; \
+ fi
+ $(INSTALL) -m 0644 orcallator.cfg $(libdir)
+ $(INSTALL) -m 0644 orcallator.se $(libdir)
+
+clean:
+ $(RM) $(TARGETS)
+
+distclean: clean
+ $(RM) *.sh orcallator.cfg orcallator_running.pl Makefile
+
+.SUFFIXES: .pl .sh
+
+.pl: $(PERL_HEAD)
+ cat $(PERL_HEAD) $< > $@
+ chmod 0755 $@
+
+.sh:
+ cp $< $@
+ chmod 0755 $@
+
+Makefile: Makefile.in
+ cd .. && CONFIG_FILES=orcallator/Makefile ./config.status
+ $(MAKE)
+
+orcallator.cfg: orcallator.cfg.in
+ cd .. && CONFIG_FILES=orcallator/orcallator.cfg ./config.status
+
+orcallator_running.pl: orcallator_running.pl.in
+ cd .. && CONFIG_FILES=orcallator/orcallator_running.pl ./config.status
+
+restart_orcallator.sh: restart_orcallator.sh.in
+ cd .. && CONFIG_FILES=orcallator/restart_orcallator.sh ./config.status
+
+start_orcallator.sh: start_orcallator.sh.in
+ cd .. && CONFIG_FILES=orcallator/start_orcallator.sh ./config.status
+
+stop_orcallator.sh: stop_orcallator.sh.in
+ cd .. && CONFIG_FILES=orcallator/stop_orcallator.sh ./config.status
Modified: trunk/orca/orcallator/orcallator.cfg.in
==============================================================================
--- trunk/orca/orcallator/orcallator.cfg.in (original)
+++ trunk/orca/orcallator/orcallator.cfg.in Sat Jul 13 19:22:23 2002
@@ -1,12 +1,12 @@
# Orca configuration file for orcallator files.
-# base_dir is prepended to the paths data_dir, find_files, html_dir,
+# base_dir is prepended to the paths find_files, html_dir, rrd_dir,
# and state_file only if the path does not match the regular
# expression ^\\?\.{0,2}/, which matches /, ./, ../, and \./.
base_dir @RRD_DIR@/orcallator
-# data_dir specifies the location of the generated RRD data files.
-data_dir .
+# rrd_dir specifies the location of the generated RRD data files.
+rrd_dir .
# state_file specifies the location of the state file that remembers
# the modification time of each source data file.
@@ -17,7 +17,7 @@
# By default create .meta tag files for all GIFs so that the web
# browser will automatically reload them.
-expire_gifs 1
+expire_images 1
# Find files at the following times:
# 0:10 to pick up new orcallator files for the new day.
@@ -36,7 +36,7 @@
# This defines where the find the source data files and the format of those
# files.
-files orcallator {
+group orcallator {
find_files @ORCALLATOR_DIR@/(.*)/(?:(?:orcallator)|(?:percol))-\d{4}-\d{2}-\d{2}
column_description first_line
date_source column_name timestamp
@@ -45,25 +45,13 @@
reopen 1
}
-html_top_title GeoCities Host Status
+html_top_title Yahoo!/GeoCities Host Status
html_page_header
- <table border=0 cellspacing=0 cellpadding=0 width="100%">
- <tr>
- <td><a href="http://www.geocities.com">
- <img border=0 alt="GeoCities"
- src="http://pic.geocities.com/images/main/hp/logo_top.gif"
- width=126 height=58></a>
- </td>
- </tr>
- <tr>
- <td><a href="http://www.geocities.com">
- <img border=0 alt="GeoCities"
- src="http://pic.geocities.com/images/main/hp/tagline.gif"
- width=124 height=36></a>
- </td>
- </tr>
- </table>
+ <a href="http://geocities.yahoo.com/home/">
+ <img border=0 alt="Yahoo!/GeoCities"
+ src="http://a372.g.akamaitech.net/7/372/27/5fd49246b3dc72/us.yimg.com/i/geo/ygeo.gif"
+ width=305 height=36></a>
<spacer type=vertical size=4>
html_page_footer
@@ -73,7 +61,7 @@
</font>
plot {
-title %g Average # Processes in Run Queue
+title %g Average # Processes in Run Queue (Load Average)
source orcallator
data 1runq
data 5runq
@@ -84,54 +72,75 @@
y_legend # Processes
data_min 0
data_max 100
+href http://www.geocities.com/~bzking/docs/orcallator.html#processes_in_run_queue
}
plot {
-title %g System Load
+title %g CPU Usage
source orcallator
-data 1load
-data 5load
-data 15load
-legend 1 Minute Average
-legend 5 Minute Average
-legend 15 Minute Average
-y_legend Load
+data usr%
+data sys%
+data 100 - usr% - sys%
+line_type area
+line_type stack
+line_type stack
+legend User
+legend System
+legend Idle
+y_legend Percent
data_min 0
-data_max 200
+data_max 100
+plot_min 0
+plot_max 100
+rigid_min_max 1
+href http://www.geocities.com/~bzking/docs/orcallator.html#cpu_usage
+}
+
+plot {
+title %g New Process Spawn Rate
+source orcallator
+data #proc/s
+data #proc/p5s
+line_type area
+line_type line1
+legend 5 min average procs/s
+legend Peak 5 second procs/s
+y_legend New processes/second
+data_min 0
+data_max 100000
+optional
+href http://www.geocities.com/~bzking/docs/orcallator.html#new_process_spawn_rate
}
plot {
-title %g Number of System & Httpd Processes
+title %g Number of System & Web Server Processes
source orcallator
data #proc
data #httpds
line_type line1
line_type area
legend System total
-legend Number httpds
+legend Number web servers
y_legend # Processes
data_min 0
data_max 10000
+color 0000ff
+color 00ff00
+optional
+href http://www.geocities.com/~bzking/docs/orcallator.html#number_system_processes
}
plot {
-title %g CPU Usage
+title %g Number of Web Server Processes
source orcallator
-data usr%
-data sys%
-data 100 - usr% - sys%
-legend User
-legend System
-legend Idle
+data #httpds
line_type area
-line_type stack
-line_type stack
-y_legend Percent
+legend Number Web Server Processes
+y_legend # Processes
data_min 0
-data_max 100
-plot_min 0
-plot_max 100
-rigid_min_max 1
+data_max 10000
+optional
+href http://www.geocities.com/~bzking/docs/orcallator.html#number_web_server_processes
}
plot {
@@ -139,11 +148,16 @@
source orcallator
data httpop/s
data http/p5s
+line_type area
+line_type line1
legend 5 min average hits/s
legend Peak 5 second hits/s
y_legend Hits/second
data_min 0
+color 00ff00
+color 0000ff
optional
+href http://www.geocities.com/~bzking/docs/orcallator.html#web_server_hit_rate
}
plot {
@@ -170,180 +184,212 @@
plot_min 0
plot_max 100
rigid_min_max 1
+optional
+href http://www.geocities.com/~bzking/docs/orcallator.html#web_server_file_size
}
plot {
title %g Web Server Data Transfer Rate
source orcallator
data httpb/s
+line_type area
legend Bytes/s
y_legend Bytes/s
data_min 0
+optional
+href http://www.geocities.com/~bzking/docs/orcallator.html#web_server_data_transfer_rate
}
plot {
title %g Web Server HTTP Error Rate
source orcallator
data htErr/s
+line_type area
legend HTTP Errors/s
y_legend Errors/s
data_min 0
+optional
+href http://www.geocities.com/~bzking/docs/orcallator.html#web_server_error_rate
}
plot {
-title %g Bits Per Second: be0
+title %g Interface Bits Per Second: be0
source orcallator
data 1024 * 8 * be0InKB/s
data 1024 * 8 * be0OuKB/s
line_type area
+line_type line1
legend Input
legend Output
-y_legend bits/s
+y_legend Bits/s
data_min 0
data_max 100000000
optional
+href http://www.geocities.com/~bzking/docs/orcallator.html#interface_bits_per_second
}
plot {
-title %g Bits Per Second: elxl0
+title %g Interface Bits Per Second: elxl0
source orcallator
data 1024 * 8 * elxl0InKB/s
data 1024 * 8 * elxl0OuKB/s
line_type area
+line_type line1
legend Input
legend Output
-y_legend bits/s
+y_legend Bits/s
data_min 0
data_max 100000000
optional
+href http://www.geocities.com/~bzking/docs/orcallator.html#interface_bits_per_second
}
plot {
-title %g Bits Per Second: hme0
+title %g Interface Bits Per Second: hme0
source orcallator
data 1024 * 8 * hme0InKB/s
data 1024 * 8 * hme0OuKB/s
line_type area
+line_type line1
legend Input
legend Output
-y_legend bits/s
+y_legend Bits/s
data_min 0
data_max 100000000
optional
+href http://www.geocities.com/~bzking/docs/orcallator.html#interface_bits_per_second
}
plot {
-title %g Bits Per Second: hme1
+title %g Interface Bits Per Second: hme1
source orcallator
data 1024 * 8 * hme1InKB/s
data 1024 * 8 * hme1OuKB/s
line_type area
+line_type line1
legend Input
legend Output
-y_legend bits/s
+y_legend Bits/s
data_min 0
data_max 100000000
optional
+href http://www.geocities.com/~bzking/docs/orcallator.html#interface_bits_per_second
}
plot {
-title %g Bits Per Second: hme2
+title %g Interface Bits Per Second: hme2
source orcallator
data 1024 * 8 * hme2InKB/s
data 1024 * 8 * hme2OuKB/s
line_type area
+line_type line1
legend Input
legend Output
-y_legend bits/s
+y_legend Bits/s
data_min 0
data_max 100000000
optional
+href http://www.geocities.com/~bzking/docs/orcallator.html#interface_bits_per_second
}
plot {
-title %g Bits Per Second: le0
+title %g Interface Bits Per Second: le0
source orcallator
data 1024 * 8 * le0InKB/s
data 1024 * 8 * le0OuKB/s
line_type area
+line_type line1
legend Input
legend Output
-y_legend bits/s
+y_legend Bits/s
data_min 0
data_max 10000000
optional
+href http://www.geocities.com/~bzking/docs/orcallator.html#interface_bits_per_second
}
plot {
-title %g Bits Per Second: le1
+title %g Interface Bits Per Second: le1
source orcallator
data 1024 * 8 * le1InKB/s
data 1024 * 8 * le1OuKB/s
line_type area
+line_type line1
legend Input
legend Output
-y_legend bits/s
+y_legend Bits/s
data_min 0
data_max 10000000
optional
+href http://www.geocities.com/~bzking/docs/orcallator.html#interface_bits_per_second
}
plot {
-title %g Packets Per Second: $1
+title %g Interface Packets Per Second: $1
source orcallator
data (.*\d)Ipkt/s
data $1Opkt/s
line_type area
+line_type line1
legend Input
legend Output
y_legend Packets/s
data_min 0
data_max 100000
flush_regexps 1
+href http://www.geocities.com/~bzking/docs/orcallator.html#interface_packets_per_second
}
plot {
-title %g Errors Per Second: $1
+title %g Interface Errors Per Second: $1
source orcallator
data (.*\d)IErr/s
data $1OErr/s
line_type area
+line_type line1
legend Input
legend Output
y_legend Errors/s
data_min 0
flush_regexps 1
+href http://www.geocities.com/~bzking/docs/orcallator.html#interface_errors_per_second
}
plot {
-title %g Ethernet Nocanput Rate
+title %g Interface Nocanput Rate
source orcallator
data (.*\d)NoCP/s
+line_type area
legend $1
y_legend Nocanput/s
data_min 0
flush_regexps 1
+href http://www.geocities.com/~bzking/docs/orcallator.html#interface_nocanput_rate
}
plot {
-title %g Ethernet Deferred Packet Rate
+title %g Interface Deferred Packet Rate
source orcallator
data (.*\d)Defr/s
+line_type area
legend $1
y_legend Defers/s
data_min 0
flush_regexps 1
+href http://www.geocities.com/~bzking/docs/orcallator.html#interface_deferred_packet_rate
}
plot {
-title %g Ethernet Collisions
+title %g Interface Collisions
source orcallator
data (.*\d)Coll%
+line_type area
legend $1
y_legend Percent
data_min 0
data_max 200
flush_regexps 1
+href http://www.geocities.com/~bzking/docs/orcallator.html#interface_collisions
}
plot {
@@ -352,11 +398,13 @@
data 1024 * 8 * tcp_InKB/s
data 1024 * 8 * tcp_OuKB/s
line_type area
+line_type line1
legend Input
legend Output
-y_legend bits/s
+y_legend Bits/s
data_min 0
data_max 1000000000
+href http://www.geocities.com/~bzking/docs/orcallator.html#TCP_bits_per_second
}
plot {
@@ -365,11 +413,13 @@
data tcp_Iseg/s
data tcp_Oseg/s
line_type area
+line_type line1
legend Input
legend Output
y_legend Segments/s
data_min 0
data_max 20000
+href http://www.geocities.com/~bzking/docs/orcallator.html#TCP_segments_per_second
}
plot {
@@ -377,11 +427,14 @@
source orcallator
data tcp_Ret%
data tcp_Dup%
+line_type area
+line_type line1
legend Retransmission
legend Duplicate Received
y_legend Percent
data_min 0
data_max 200
+href http://www.geocities.com/~bzking/docs/orcallator.html#TCP_retransmission_duplicate_received_percentage
}
plot {
@@ -389,39 +442,48 @@
source orcallator
data tcp_Icn/s
data tcp_Ocn/s
+line_type area
+line_type line1
legend Input - Passive
legend Output - Active
y_legend Connections/s
data_min 0
data_max 10000
+href http://www.geocities.com/~bzking/docs/orcallator.html#TCP_new_connection_rate
}
plot {
title %g TCP Number Open Connections
source orcallator
data tcp_estb
+line_type area
legend # Open Connections
y_legend Number Open TCP Connections
data_min 0
data_max 50000
+href http://www.geocities.com/~bzking/docs/orcallator.html#TCP_number_open_connections
}
plot {
title %g TCP Reset Rate
source orcallator
data tcp_Rst/s
+line_type area
legend Number TCP Resets/s
y_legend Resets/s
data_min 0
+href http://www.geocities.com/~bzking/docs/orcallator.html#TCP_reset_rate
}
plot {
title %g TCP Attempt Fail Rate
source orcallator
data tcp_Atf/s
+line_type area
legend TCP Attempt Fails/s
y_legend Atf/s
data_min 0
+href http://www.geocities.com/~bzking/docs/orcallator.html#TCP_attempt_fail_rate
}
plot {
@@ -434,6 +496,7 @@
legend TCP Listen Drop Q0
legend TCP Half Open Drops
data_min 0
+href http://www.geocities.com/~bzking/docs/orcallator.html#TCP_listen_drop_rate
}
plot {
@@ -441,19 +504,24 @@
source orcallator
data smtx
data smtx/cpu
+line_type area
+line_type line1
legend Sleeps on mutex
legend Sleeps on mutex/cpu
y_legend Smtx/s
data_min 0
+href http://www.geocities.com/~bzking/docs/orcallator.html#sleeps_mutex_rate
}
plot {
title %g NFS Call Rate
source orcallator
data nfs_call/s
+line_type area
legend NFS Calls/s
y_legend Calls/s
data_min 0
+href http://www.geocities.com/~bzking/docs/orcallator.html#NFS_call_rate
}
plot {
@@ -461,23 +529,87 @@
source orcallator
data nfs_timo/s
data nfs_badx/s
+line_type area
+line_type line1
legend NFS Timeouts
legend Bad Transmits
y_legend Count/s
data_min 0
+href http://www.geocities.com/~bzking/docs/orcallator.html#NFS_timeouts_bad_transmits_rate
}
plot {
-title %g Peak & Mean Disk Busy
+title %g Disk System Wide Reads/Writes Per Second
source orcallator
-data disk_peak
-data disk_mean
+data disk_rd/s
+data disk_wr/s
+line_type area
line_type line1
+legend Reads/s
+legend Writes/s
+y_legend Ops/s
+data_min 0
+optional
+href http://www.geocities.com/~bzking/docs/orcallator.html#disk_system_wide_reads_writes_per_second
+}
+
+plot {
+title %g Disk System Wide Transfer Rate
+source orcallator
+data 1024 * disk_rK/s
+data 1024 * disk_wK/s
line_type area
-legend Peak Disk Busy
-legend Mean Disk Busy
-y_legend Disk Busy Measure
+line_type line1
+legend Read transfer rate
+legend Write transfer rate
+y_legend Bytes/s
+data_min 0
+optional
+href http://www.geocities.com/~bzking/docs/orcallator.html#disk_system_wide_transfer_rate
+}
+
+plot {
+title %g Disk Run Percent
+source orcallator
+data disk_runp_(c\d+t\d+d\d+)
+line_type line2
+legend $1
+y_legend Run Percent
+data_min 0
+data_max 100
+plot_min 0
+optional
+href http://www.geocities.com/~bzking/docs/orcallator.html#disk_run_percent
+}
+
+plot {
+title %g Disk Space Percent Usage
+source orcallator
+data mntP_(.*)
+line_type line2
+legend $1
+y_legend Percent
+data_min 0
+data_max 100
+plot_min 0
+plot_max 100
+optional
+href http://www.geocities.com/~bzking/docs/orcallator.html#disk_space_percent_usage
+}
+
+plot {
+title %g Disk Inode Percent Usage
+source orcallator
+data mntp_(.*)
+line_type line2
+legend $1
+y_legend Percent
data_min 0
+data_max 100
+plot_min 0
+plot_max 100
+optional
+href http://www.geocities.com/~bzking/docs/orcallator.html#disk_inode_percent_usage
}
plot {
@@ -485,11 +617,14 @@
source orcallator
data dnlc_hit%
data inod_hit%
+line_type area
+line_type line1
legend DNLC
legend Inode Cache
y_legend Percent
data_min 0
data_max 100
+href http://www.geocities.com/~bzking/docs/orcallator.html#cache_hit_percentages
}
plot {
@@ -497,39 +632,59 @@
source orcallator
data dnlc_ref/s
data inod_ref/s
-line_type line1
line_type area
+line_type line1
legend DNLC
legend Inode Cache
y_legend References/s
data_min 0
+href http://www.geocities.com/~bzking/docs/orcallator.html#cache_reference_rate
}
plot {
title %g Inode Steal Rate
source orcallator
data inod_stl/s
+line_type area
legend Inode w/page steals/s
y_legend Steals/s
data_min 0
+href http://www.geocities.com/~bzking/docs/orcallator.html#inode_steal_rate
}
plot {
title %g Available Swap Space
source orcallator
data 1024 * swap_avail
+line_type area
legend Available Swap Space
y_legend Bytes
+base 1024
data_min 0
+href http://www.geocities.com/~bzking/docs/orcallator.html#available_swap_space
}
plot {
title %g Page Residence Time
source orcallator
data page_rstim
+line_type area
legend Page Residence Time
y_legend Seconds
data_min 0
+href http://www.geocities.com/~bzking/docs/orcallator.html#page_residence_time
+}
+
+plot {
+title %g Page Scan Rate
+source orcallator
+data scanrate
+line_type area
+legend Page Scan Rate
+y_legend Pages/s
+data_min 0
+optional
+href http://www.geocities.com/~bzking/docs/orcallator.html#page_scan_rate
}
plot {
@@ -553,6 +708,7 @@
color 00ff00
color ff0000
color 0000ff
+href http://www.geocities.com/~bzking/docs/orcallator.html#page_usage
}
plot {
@@ -560,9 +716,12 @@
source orcallator
data pageslock
data pagesio
+line_type area
+line_type line1
legend Locked
legend IO
y_legend Number of Pages
data_min 0
plot_min 0
+href http://www.geocities.com/~bzking/docs/orcallator.html#pages_locked_IO
}
Modified: trunk/orca/orcallator/orcallator.se
==============================================================================
--- trunk/orca/orcallator/orcallator.se (original)
+++ trunk/orca/orcallator/orcallator.se Sat Jul 13 19:22:24 2002
@@ -4,18 +4,78 @@
// This program logs many different system quantities to a log file
// for later processing.
//
-// Author: Blair Zajac <bzajac at geostaff.com>
+// Author: Blair Zajac <bzajac at geostaff.com>.
//
// Portions copied from percollator.se written by Adrian Cockroft.
//
-// Version 1.7: Mar 25, 1999 Speed up by 20% and simplify count_proc.
-// Version 1.6: Feb 23, 1999 Print pvm.user_time and system_time correctly.
-// Version 1.5: Feb 23, 1999 Always write header to a new file.
-// Version 1.4: Feb 19, 1999 Handle missing HTTP/1.x in access log.
-// Version 1.3: Feb 18, 1999 On busy machines httpops5 will be enlarged.
-// Version 1.2: Feb 18, 1999 Output data on integral multiples of interval.
-// Version 1.1: Feb 18, 1999 Integrate Squid log processing from SE 3.1.
-// Version 1.0: Sep 9, 1998 Initial version.
+// Version 1.19: Oct 13, 1999 Prevent a division by zero in calculating the
+// mean_disk_busy if the number of disks on the
+// system is 0.
+// Version 1.18: Oct 12, 1999 Rename disk_runp.c?t?d? to disk_runp_c?t?d?
+// to remove the .'s.
+// Version 1.17: Oct 8, 1999 Do not record mount point statistics for
+// locally mounted /cdrom partitions.
+// Version 1.16: Oct 7, 1999 To keep backwards compatibility, define
+// WATCH_WEB if WATCH_HTTPD is defined.
+// If the COMPRESSOR environmental variable
+// is defined, then when a new log file is opened
+// for a new day, the just closed log file is
+// compressed using the COMPRESSOR command in the
+// following manner:
+// system(sprintf("%s %s &", COMPRESSOR, log_file)
+// COMPRESSOR should be set to something like
+// "gzip -9", or "compress", or "bzip2 -9".
+// Version 1.15: Oct 5, 1999 kvm$mpid is a int not a long. This caused
+// problems on Solaris 7 hosts running a 64
+// bit kernel.
+// Version 1.14: Oct 1, 1999 Rename disk.c?t?d? column names to
+// disk_runp.c?t?d? to better reflect the data
+// being recorded and to allow for more per disk
+// information later.
+// Version 1.13: Sep 24, 1999 Fix a bug in the disk_mean calculation where
+// it was being divided by the wrong disk_count.
+// Now it should be much larger and in scale with
+// disk_peak. When WATCH_DISK is defined, now
+// print each disk's run percent. Add a new
+// define WATCH_MOUNTS, which reports each local
+// mount point's disk space and inode capacity,
+// usage, available for non-root users and
+// percent used. This comes from Duncan Lawie
+// tyger at hoopoes.com. Add some smarts so that if
+// the number of interfaces, physical disks, or
+// mounted partitions changes, then a new header
+// is printed. This will prevent column name and
+// data mixups when the system configuration
+// changes.
+// Version 1.12: Sep 14, 1999 Add the page scan rate as scanrate in
+// measure_cpu.
+// Version 1.11: Aug 13, 1999 Add the number of CPUs as ncpus. Move
+// measure_disk and measure_ram sooner in the
+// list of subsystems to handle. Increase the
+// number of characters for each network
+// interface from four to five. Add new disk
+// reads, writes, Kbytes read, and Kbytes
+// written per second. Add number of bytes
+// of free memory in bytes as freememK.
+// Version 1.10: Jul 28, 1999 Measure the process spawn rate if WATCH_CPU
+// is defined and the user is root.
+// Version 1.9: Jun 2, 1999 If WATCH_YAHOO is defined, then process the
+// access log as a Yahoo! style access log.
+// Restructure the code to handle different
+// web server access log formats.
+// Version 1.8: Jun 1, 1999 If the environmental variable WEB_SERVER is
+// defined, use its value of the as the name
+// of the process to count for the number of
+// web servers on the system. If WEB_SERVER
+// is not defined, then count number of httpd's.
+// Version 1.7: Mar 25, 1999 Simplify and speed up count_proc by 20%.
+// Version 1.6: Feb 23, 1999 Print pvm.user_time and system_time correctly.
+// Version 1.5: Feb 23, 1999 Always write header to a new file.
+// Version 1.4: Feb 19, 1999 Handle missing HTTP/1.x in access log.
+// Version 1.3: Feb 18, 1999 On busy machines httpops5 will be enlarged.
+// Version 1.2: Feb 18, 1999 Output data on integral multiples of interval.
+// Version 1.1: Feb 18, 1999 Integrate Squid log processing from SE 3.1.
+// Version 1.0: Sep 9, 1998 Initial version.
//
// The default sampling interval in seconds.
@@ -25,16 +85,22 @@
// Define the different parts of the system you want to examine.
#ifdef WATCH_OS
-#define WATCH_CPU
-#define WATCH_MUTEX
-#define WATCH_NET
-#define WATCH_TCP
-#define WATCH_NFS
-#define WATCH_DISK
-#define WATCH_DNLC
-#define WATCH_INODE
-#define WATCH_RAM
-#define WATCH_PAGES
+#define WATCH_CPU 1
+#define WATCH_MUTEX 1
+#define WATCH_NET 1
+#define WATCH_TCP 1
+#define WATCH_NFS 1
+#define WATCH_MOUNTS 1
+#define WATCH_DISK 1
+#define WATCH_DNLC 1
+#define WATCH_INODE 1
+#define WATCH_RAM 1
+#define WATCH_PAGES 1
+#endif
+
+// Keep backwards compatibility with WATCH_HTTPD.
+#ifdef WATCH_HTTPD
+#define WATCH_WEB 1
#endif
#include <stdio.se>
@@ -54,9 +120,95 @@
#include <tcp_class.se>
#include <tcp_rules.se>
-#ifdef WATCH_HTTPD
+#ifdef WATCH_MOUNTS
+#include <mnt_class.se>
+#include <statvfs.se>
+#endif
+
+#if WATCH_CPU || WATCH_WEB
#include <proc.se>
+
+#ifdef WATCH_CPU
+// This is the maximum pid on Solaris hosts.
+#define DEFAULT_MAXPID 30000
+#include <fcntl.se>
+#endif
+
+#ifdef WATCH_WEB
#include <stat.se>
+
+// Define this macro which returns the size index for a file of a
+// particular size. This saves the overhead of a function call.
+#define WWW_SIZE_INDEX(size, size_index) \
+ if (size < 1024) { \
+ size_index=0; /* under 1KB */ \
+ } else { \
+ if (size < 10240) { \
+ size_index=1; /* under 10K */ \
+ } else { \
+ if (size < 102400) { \
+ size_index=2; /* under 100KB */ \
+ } else { \
+ if (size < 1048576) { \
+ size_index=3; /* under 1MB */ \
+ } else { \
+ size_index=4; /* over 1MB */ \
+ } \
+ } \
+ } \
+ } \
+ dwnld_size[size_index]++;
+
+// Handle the reply code from the server.
+#define WWW_REPLY_CODE(word) \
+ if (word != nil) { \
+ if (word == "304") { \
+ httpop_condgets++; \
+ } \
+ else { \
+ first_byte = word; \
+ if (first_byte[0] == '4' || first_byte[0] == '5') { \
+ httpop_errors++; \
+ } \
+ } \
+ }
+
+// Handle the method of the object served. This define only works with
+// non-proxy servers.
+#define WWW_METHOD1(word) \
+ if (word != nil) { \
+ switch (word) { \
+ case "get": \
+ case "GET": \
+ httpop_gets++; \
+ break; \
+ case "post": \
+ case "POST": \
+ httpop_posts++; \
+ break; \
+ case "head": \
+ case "HEAD": \
+ ishead = 1; \
+ httpop_condgets++; \
+ break;
+
+#ifdef WATCH_SQUID
+#define WWW_METHOD2 \
+ case "icp_query": \
+ case "ICP_QUERY": \
+ squid_icp_queries++; \
+ break;
+#else
+#define WWW_METHOD2
+#endif
+
+#define WWW_METHOD_END \
+ default: \
+ break; \
+ } \
+ }
+#define WWW_METHOD(word) WWW_METHOD1(word) WWW_METHOD2 WWW_METHOD_END
+#endif
#endif
// Put all rules here so they can be accessed by the handle functions.
@@ -94,77 +246,116 @@
ks_system_misc tmp_kstat_misc;
// Put application globals here.
-string nodename; // Name of machine running the script.
-string program_name; // Name of this program.
-int hz; // Clock tick rate.
-int page_size; // Page size in bytes.
-long boot_time; // Boot time of the system.
-long interval = SAMPLE_INTERVAL; // Sampling interval.
+string nodename; // Name of this machine.
+string program_name; // Name of this program.
+int hz; // Clock tick rate.
+int page_size; // Page size in bytes.
+long boot_time; // Boot time of the system.
+long interval = SAMPLE_INTERVAL; // Sampling interval.
-// Variables for handling the httpd access log.
-#ifdef WATCH_HTTPD
-ulong log_file;
-string log_name = getenv("WEB_LOG");
-string log_gateway = getenv("GATEWAY");
-uint log_gatelen;
-stat_t log_stat[1];
-ulong log_ino;
-long log_size;
-
-double log_interval; // Hi-res interval time.
-ulonglong log_then;
-ulonglong log_now;
-
-double log5_interval; // Actual hi-res 5 second interval.
-ulonglong log5_then;
-ulonglong log5_now;
-
-double httpops;
-double httpops5;
-double gateops;
-double dtmp;
-
-long httpop_gets;
-long httpop_condgets; // HEAD or code = 304 conditional get no data.
-long httpop_posts;
-long httpop_cgi_bins;
-long httpop_searches;
-long httpop_errors;
-string search_url;
-long dwnld_size[5]; // [0] < 1K, [1] < 10K, [2] < 100K, [3] < 1M, [4] >= 1M
-long dwnld_totalz; // total size counted from log
+#ifdef WATCH_CPU
+int can_read_kernel = 0; // If the kernel can be read.
+int kvm$mpid; // The last created PID.
+// These variables store the mpid before and after the standard interval.
+int mpid_previous;
+int mpid_current;
+ulonglong mpid_then;
+ulonglong mpid_now;
+
+// These variables store the mpid before and after 5 second intervals.
+int mpid5_previous;
+int mpid5_current;
+ulonglong mpid5_then;
+ulonglong mpid5_now;
+double mpid5_rate;
+#endif
+
+#ifdef WATCH_MOUNTS
+mnttab_t mnt$mnt;
+mnttab_t tmp_mnt;
+#endif
+
+// Variables for handling the httpd access log.
+#ifdef WATCH_WEB
+string www_search_url = getenv("SEARCHURL");
+string www_server_proc_name = getenv("WEB_SERVER");
+string www_log_filename = getenv("WEB_LOG");
+string www_gateway = getenv("GATEWAY");
+ulong www_fd;
+uint www_gatelen;
+stat_t www_stat[1];
+ulong www_ino;
+long www_size;
+
+double www_interval; // Hi-res interval time.
+ulonglong www_then;
+ulonglong www_now;
+
+double www5_interval; // Actual hi-res 5 second interval.
+ulonglong www5_then;
+ulonglong www5_now;
+
+double httpops;
+double httpops5;
+double gateops;
+double dtmp;
+
+long httpop_gets;
+long httpop_condgets; // HEAD or code = 304 conditional get no data.
+long httpop_posts;
+long httpop_cgi_bins;
+long httpop_searches;
+long httpop_errors;
+long dwnld_size[5]; // [0] < 1K, [1] < 10K, [2] < 100K, [3] < 1M, [4] >= 1M
+long dwnld_totalz; // total size counted from log
+
+#if WATCH_PROXY || WATCH_SQUID || WATCH_YAHOO
+// If we're watching a Yahoo log, then take the transfer time to be the
+// processing time.
+double www_dwnld_time_sum; // transfer time
+double www_dwnld_time_by_size[5]; // mean transfer time by size bin
+#endif
#if WATCH_PROXY || WATCH_SQUID
-double prxy_squid_xfer_sum; // transfer time
-double prxy_squid_xfer_by_size[5]; // mean transfer time by size bin
-long prxy_squid_indirect; // number of hits that go via PROXY,SOCKS,parent
-long prxy_squid_cache_hits; // number of hits returned from cache
+long prxy_squid_indirect; // number of hits that go via PROXY,SOCKS,parent
+long prxy_squid_cache_hits; // number of hits returned from cache
#endif
#ifdef WATCH_PROXY
-long prxy_cache_writes; // number of writes and updates to cache
-long prxy_uncacheable; // number of explicitly uncacheable httpops
- // any extra is errors or incomplete ops
+long prxy_cache_writes; // number of writes and updates to cache
+long prxy_uncacheable; // number of explicitly uncacheable httpops
+ // any extra is errors or incomplete ops
#endif
#ifdef WATCH_SQUID
-long squid_cache_misses;
-long squid_icp_requests;
-long squid_icp_queries;
-long squid_client_http;
+long squid_cache_misses;
+long squid_icp_requests;
+long squid_icp_queries;
+long squid_client_http;
#endif
#endif
// Variables for handling output.
-string col_comment[MAX_COLUMNS]; // Comments for each column.
-string col_data[MAX_COLUMNS]; // Data for each column.
-int current_column; // The current column.
+string compress = getenv("COMPRESSOR"); // How to compress logs.
+ulong ofile; // File pointer to the logging file.
+string col_comment[MAX_COLUMNS]; // Comments for each column.
+string col_data[MAX_COLUMNS]; // Data for each column.
+int current_column = 0; // The current column.
+int print_header = 1; // Flag to flush header.
-// Reset the output data.
-reset_output()
+// Send the stored columns of information to the output.
+print_columns(string data[])
{
- current_column = 0;
+ int i;
+ for (i=0; i<current_column; i++) {
+ fprintf(ofile, "%s", data[i]);
+ if (i != current_column-1) {
+ fputc(' ', ofile);
+ }
+ }
+ fputc('\n', ofile);
+ fflush(ofile);
}
// Add one column of comments and data to the buffers.
@@ -181,64 +372,61 @@
++current_column;
}
-print_columns(ulong fd, string data[])
-{
- int i;
- for (i=0; i<current_column; ++i) {
- fprintf(fd, "%s", data[i]);
- if (i != current_column-1) {
- fputc(' ', fd);
- }
+flush_output() {
+ if (print_header != 0) {
+ print_columns(col_comment);
+ print_header = 0;
}
- fputc('\n', fd);
- fflush(fd);
+ print_columns(col_data);
+ current_column = 0;
}
-/* returns output file - creates or appends to logfile if OUTDIR is set
- returns stdout and writes header if no OUTDIR
- starts new logfile each day
-*/
-ulong checkoutput(tm_t now) {
+// Sets ofile to the output file pointer. Creates or appends to the
+// log file if OUTDIR is set, otherwise sets the file pointer to STDOUT.
+// It start a new log file each day. It compresses the previous days
+// log file if the environmental variable COMPRESSOR is set.
+checkoutput(tm_t now) {
string outdir = getenv("OUTDIR");
string outname;
- ulong ofile;
tm_t then;
char tm_buf[32];
if (outdir == nil) {
- /* no output dir so use stdout */
+ // No output dir so use stdout.
if (ofile == 0) {
- /* first time, so print header and set ofile */
+ // First time, so print header and set ofile.
ofile = stdout;
- print_columns(ofile, col_comment);
+ print_header = 1;
}
- return ofile;
+ return;
}
- /* maintain daily output logfiles in OUTDIR */
+ // Maintain daily output logfiles in OUTDIR.
if (now.tm_yday != then.tm_yday) {
- /* first time or day has changed, start new logfile */
+ // First time or day has changed, start new logfile.
if (ofile != 0) {
- /* close existing output file */
+ // Close and optionally compress the existing output file.
fclose(ofile);
+ if (compress != nil) {
+ system(sprintf(compress, outname));
+ }
}
strftime(tm_buf, sizeof(tm_buf), "%Y-%m-%d", now);
outname = sprintf("%s/percol-%s", outdir, tm_buf);
- ofile = fopen(outname, "a"); /* open for append either way */
+ // Open for append either way.
+ ofile = fopen(outname, "a");
if (ofile == 0) {
perror("can't open output logfile");
exit(1);
}
// Always write header.
- print_columns(ofile, col_comment);
+ print_header = 1;
then = now;
}
- return ofile;
}
int main(int argc, string argv[])
{
utsname_t u[1];
- ulong ofile; // File pointer to the logging file.
long now;
long sleep_till; // Time to sleep to.
tm_t tm_now;
@@ -259,24 +447,28 @@
default:
fprintf(stderr, "usage: se [Defines] %s [interval]\n", program_name);
fprintf(stderr, "%s can use the following environmental variables:\n", program_name);
+ fprintf(stderr, " setenv OUTDIR /var/orcallator/logs - log file directory, default stdout\n");
+ fprintf(stderr, " setenv WEB_SERVER apache - string to search for number of web servers\n");
fprintf(stderr, " setenv WEB_LOG /ns-home/httpd-80/logs/access - location of web server log\n");
fprintf(stderr, " setenv GATEWAY some.where.com - special address to monitor\n");
- fprintf(stderr, " setenv OUTDIR /ns-home/docs/orcallator/logs - default stdout\n");
fprintf(stderr, " setenv SEARCHURL srch.cgi - match for search scripts, default is search.cgi\n");
+ fprintf(stderr, " setenv COMPRESSOR \"gzip -9\" - compress previous day logs using this command\n");
fprintf(stderr, "Defines:\n");
- fprintf(stderr, " -DWATCH_HTTPD watch httpd access log\n");
- fprintf(stderr, " -DWATCH_PROXY use WEB_LOG as a NCSA style proxy log\n");
- fprintf(stderr, " -DWATCH_SQUID use WEB_LOG as a Squid log\n");
- fprintf(stderr, " -DWATCH_OS includes all of the below:\n");
- fprintf(stderr, " -DWATCH_CPU watch the cpu load, run queue, etc\n");
- fprintf(stderr, " -DWATCH_MUTEX watch the number of mutex spins\n");
- fprintf(stderr, " -DWATCH_NET watch all Ethernet interfaces\n");
- fprintf(stderr, " -DWATCH_TCP watch all the TCP/IP stack\n");
- fprintf(stderr, " -DWATCH_NFS watch NFS requests\n");
- fprintf(stderr, " -DWATCH_DNLC watch the directory name lookup cache\n");
- fprintf(stderr, " -DWATCH_INODE watch the inode cache\n");
- fprintf(stderr, " -DWATCH_RAM watch memory usage\n");
- fprintf(stderr, " -DWATCH_PAGES watch where pages are allocated\n");
+ fprintf(stderr, " -DWATCH_WEB watch web server access logs\n");
+ fprintf(stderr, " -DWATCH_PROXY use WEB_LOG as a NCSA style proxy log\n");
+ fprintf(stderr, " -DWATCH_SQUID use WEB_LOG as a Squid log\n");
+ fprintf(stderr, " -DWATCH_OS includes all of the below:\n");
+ fprintf(stderr, " -DWATCH_CPU watch the cpu load, run queue, etc\n");
+ fprintf(stderr, " -DWATCH_MUTEX watch the number of mutex spins\n");
+ fprintf(stderr, " -DWATCH_NET watch all Ethernet interfaces\n");
+ fprintf(stderr, " -DWATCH_TCP watch all the TCP/IP stack\n");
+ fprintf(stderr, " -DWATCH_NFS watch NFS requests\n");
+ fprintf(stderr, " -DWATCH_MOUNTS watch usage of mount points\n");
+ fprintf(stderr, " -DWATCH_DISK watch disk read/write usage\n");
+ fprintf(stderr, " -DWATCH_DNLC watch the directory name lookup cache\n");
+ fprintf(stderr, " -DWATCH_INODE watch the inode cache\n");
+ fprintf(stderr, " -DWATCH_RAM watch memory usage\n");
+ fprintf(stderr, " -DWATCH_PAGES watch where pages are allocated\n");
exit(1);
break;
}
@@ -284,7 +476,7 @@
// Initialize the various structures.
initialize();
- // Run forever. If WATCH_HTTPD is defined, then have measure_httpd()
+ // Run forever. If WATCH_WEB is defined, then have measure_web()
// do the sleeping while it is watching the access log file until the
// next update time for the whole operating system. Also, collect the
// data from the access log file before printing any output.
@@ -292,90 +484,123 @@
// Calculate the next time to sleep to that is an integer multiple of
// the interval time. Make sure that at least half of the interval
// passes before waking up.
- now = time(0);
+ now = time(0);
sleep_till = (now/interval)*interval;
- while (now + interval*0.5 > sleep_till) {
+ while (sleep_till < now + interval*0.5) {
sleep_till += interval;
}
-#ifdef WATCH_HTTPD
- measure_httpd(sleep_till);
+#ifdef WATCH_WEB
+ measure_web(sleep_till);
#else
- while (now < sleep_till) {
- sleep(sleep_till - now);
- now = time(0);
- }
+ sleep_till_and_count_new_proceses(sleep_till);
#endif
- // Reset the output.
- reset_output();
-
// Get the current time.
now = time(0);
tm_now = localtime(&now);
- handle_os(now, tm_now);
+ measure_os(now, tm_now);
-#ifdef WATCH_HTTPD
+#ifdef WATCH_WEB
put_httpd();
#endif
- // Get a filedescriptor to write to. Maintains daily output files.
- ofile = checkoutput(tm_now);
+ // Get a file descriptor to write to. Maintains daily output files.
+ checkoutput(tm_now);
// Print the output.
- print_columns(ofile, col_data);
+ flush_output();
}
return 0;
}
initialize()
{
- // Sleep to give the disks a chance to update.
- sleep(DISK_UPDATE_RATE);
+#ifdef WATCH_CPU
+ int i;
+#endif
- // Get the clock tick rate.
- hz = sysconf(_SC_CLK_TCK);
+ // Get the command to compress the log files.
+ if (compress == nil || compress == "") {
+ compress = nil;
+ }
+ else {
+ compress = sprintf("%s %%s &", compress);
+ }
- // Get the page size.
- page_size = sysconf(_SC_PAGESIZE);
+#ifdef WATCH_CPU
+ // Initialize the process spawning rate measurement variables.
+ // Determine if the kernel can be read to measure the last pid.
+ i = open("/dev/kmem", O_RDONLY);
+ if (i != -1) {
+ close(i);
+ can_read_kernel = 1;
+ mpid_previous = kvm$mpid;
+ mpid_then = gethrtime();
+ mpid_current = mpid_previous;
+
+ mpid5_then = mpid_then;
+ mpid5_previous = mpid_previous;
+ mpid5_current = mpid_previous;
+ mpid5_rate = 0;
+ }
+#endif
- // Calculate the system boot time.
- boot_time = time(0) - (kstat$misc.clk_intr / hz);
+#ifdef WATCH_WEB
+ // Initialize those variables that were not set with environmental
+ // variables.
+ if (www_search_url == nil || www_search_url == "") {
+ www_search_url = "search.cgi";
+ }
-#ifdef WATCH_HTTPD
- search_url = getenv("SEARCHURL");
- if (search_url == nil) {
- search_url = "search.cgi";
+ if (www_server_proc_name == nil || www_server_proc_name == "") {
+ www_server_proc_name = "httpd";
}
- if (log_gateway == nil) {
- log_gateway = "NoGatway";
- log_gatelen = 0;
+ if (www_gateway == nil || www_gateway == "" ) {
+ www_gateway = "NoGatway";
+ www_gatelen = 0;
}
else {
- log_gatelen = strlen(log_gateway);
+ www_gatelen = strlen(www_gateway);
}
- log_file = fopen(log_name, "r");
- if (log_file != 0) {
- stat(log_name, log_stat);
- log_ino = log_stat[0].st_ino;
- log_size = log_stat[0].st_size;
- // Move to the end of the file.
- fseek(log_file, 0, 2);
+ // Initialize the web server watching variables. Move the file pointer
+ // to the end of the web access log and note the current time.
+ if (www_log_filename != nil) {
+ www_fd = fopen(www_log_filename, "r");
+ if (www_fd != 0) {
+ stat(www_log_filename, www_stat);
+ www_ino = www_stat[0].st_ino;
+ www_size = www_stat[0].st_size;
+ // Move to the end of the file.
+ fseek(www_fd, 0, 2);
+ }
}
- log_then = gethrtime();
- log5_then = log_then;
+ www_then = gethrtime();
+ www5_then = www_then;
#endif
+ // Sleep to give the disks a chance to update.
+ sleep(DISK_UPDATE_RATE);
+
+ // Get the clock tick rate.
+ hz = sysconf(_SC_CLK_TCK);
+
+ // Get the page size.
+ page_size = sysconf(_SC_PAGESIZE);
+
+ // Calculate the system boot time.
+ boot_time = time(0) - (kstat$misc.clk_intr / hz);
+
// Perform the first measurement of the system.
- measure_os();
+ _measure_os();
}
// Measure the system statistics all at once.
-measure_os()
+_measure_os()
{
tmp_lrcpu = lr_cpu$cpu;
tmp_mutex = lr_mutex$m;
@@ -397,62 +622,67 @@
tmp_kstat_misc = kstat$misc;
}
-handle_os(long now, tm_t tm_now)
+measure_os(long now, tm_t tm_now)
{
// Measure the system now.
- measure_os();
+ _measure_os();
// Take care of miscellaneous measurements.
- handle_misc(now, tm_now);
+ measure_misc(now, tm_now);
// Take care of cpu.
#ifdef WATCH_CPU
- handle_cpu();
+ measure_cpu();
#endif
// Take care of mutexes.
#ifdef WATCH_MUTEX
- handle_mutex();
+ measure_mutex();
+#endif
+
+ // Take care of mount pointes.
+#ifdef WATCH_MOUNTS
+ measure_mounts();
+#endif
+
+ // Take care of the disks.
+#ifdef WATCH_DISK
+ measure_disk();
+#endif
+
+ // Take care of ram.
+#ifdef WATCH_RAM
+ measure_ram();
#endif
// Take care of the network.
#ifdef WATCH_NET
- handle_net();
+ measure_net();
#endif
// Take care of TCP/IP.
#ifdef WATCH_TCP
- handle_tcp();
+ measure_tcp();
#endif
// Take care of NFS.
#ifdef WATCH_NFS
- handle_nfs();
-#endif
-
- // Take care of the disks.
-#ifdef WATCH_DISK
- handle_disk();
+ measure_nfs();
#endif
// Take care of DNLC.
#ifdef WATCH_DNLC
- handle_dnlc();
+ measure_dnlc();
#endif
// Take care of the inode cache.
#ifdef WATCH_INODE
- handle_inode();
-#endif
-
- // Take care of ram.
-#ifdef WATCH_RAM
- handle_ram();
+ measure_inode();
#endif
// Take care of page allocations.
#ifdef WATCH_PAGES
- handle_pages();
+ measure_pages();
#endif
}
@@ -469,7 +699,7 @@
}
}
-handle_misc(long now, tm_t tm_now)
+measure_misc(long now, tm_t tm_now)
{
long uptime;
char states[12];
@@ -493,69 +723,153 @@
put_output(" timestamp", sprintf("%10d", now));
put_output("locltime", tm_buf);
- put_output(" uptime", sprintf("%8d", uptime));
put_output("DNnsrkcmdit", states);
+ put_output(" uptime", sprintf("%8d", uptime));
+}
+
+sleep_till_and_count_new_proceses(long sleep_till)
+{
+ long now;
+#ifdef WATCH_CPU
+ long sleep_till1;
+ int mpid5_diff;
+ double mpid5_interval;
+ double rate;
+#endif
+
+ now = time(0);
+ while (now < sleep_till) {
+#ifdef WATCH_CPU
+ if (can_read_kernel != 0) {
+ // Sleep at least 5 seconds to make a measurement.
+ sleep_till1 = now + 5;
+ while (now < sleep_till1) {
+ sleep(sleep_till1 - now);
+ now = time(0);
+ }
+
+ // Measure the 5 second process creation rate.
+ mpid5_current = kvm$mpid;
+ mpid5_now = gethrtime();
+ mpid5_interval = (mpid5_now - mpid5_then) * 0.000000001;
+ mpid5_then = mpid5_now;
+ if (mpid5_current >= mpid5_previous) {
+ mpid5_diff = mpid5_current - mpid5_previous;
+ }
+ else {
+ mpid5_diff = mpid5_current + DEFAULT_MAXPID - mpid5_previous;
+ }
+ rate = mpid5_diff/mpid5_interval;
+ if (rate > mpid5_rate) {
+ mpid5_rate = rate;
+ }
+ mpid5_previous = mpid5_current;
+
+ // Now take these results to measure the long interval rate.
+ // Because the mpid may flip over DEFAULT_MAXPID more than once
+ // in the long interval time span, use the difference between
+ // the previous and current mpid over a 5 second interval to
+ // calculate the long interval difference.
+ mpid_current += mpid5_diff;
+ mpid_now = mpid5_now;
+ }
+ else {
+ sleep(sleep_till - now);
+ }
+#else
+ sleep(sleep_till - now);
+#endif
+ now = time(0);
+ }
}
#ifdef WATCH_CPU
-handle_cpu()
+measure_cpu()
{
p_vmstat pvm;
+ double mpid_interval;
+ double mpid_rate;
pvm = vmglobal_total();
// In SE 3.0 user_time and system_time are int and in SE 3.1 they are
// double, so cast everything to double using + 0.0.
- put_output(" usr%", sprintf("%5.1f", pvm.user_time + 0.0));
- put_output(" sys%", sprintf("%5.1f", pvm.system_time + 0.0));
- put_output(" 1load", sprintf("%6.2f", tmp_kstat_misc.avenrun_1min/256.0));
- put_output(" 5load", sprintf("%6.2f", tmp_kstat_misc.avenrun_5min/256.0));
- put_output("15load", sprintf("%6.2f", tmp_kstat_misc.avenrun_15min/256.0));
- put_output(" 1runq", sprintf("%6.2f", tmp_kstat_misc.avenrun_1min/256.0));
- put_output(" 5runq", sprintf("%6.2f", tmp_kstat_misc.avenrun_5min/256.0));
- put_output("15runq", sprintf("%6.2f", tmp_kstat_misc.avenrun_15min/256.0));
- put_output("#proc", sprintf("%5lu", tmp_kstat_misc.nproc));
+ put_output(" usr%", sprintf("%5.1f", pvm.user_time + 0.0));
+ put_output(" sys%", sprintf("%5.1f", pvm.system_time + 0.0));
+ put_output(" 1runq", sprintf("%6.2f", tmp_kstat_misc.avenrun_1min/256.0));
+ put_output(" 5runq", sprintf("%6.2f", tmp_kstat_misc.avenrun_5min/256.0));
+ put_output("15runq", sprintf("%6.2f", tmp_kstat_misc.avenrun_15min/256.0));
+ put_output("#proc", sprintf("%5lu", tmp_kstat_misc.nproc));
+ put_output("scanrate", sprintf("%8.3f", pvm.scan));
+
+ // Calculate the rate of new process spawning.
+ if (can_read_kernel != 0) {
+ mpid_interval = (mpid_now - mpid_then) * 0.000000001;
+ mpid_rate = (mpid_current - mpid_previous) / mpid_interval;
+ put_output("#proc/s", sprintf("%7.3f", mpid_rate));
+ put_output("#proc/p5s", sprintf("%9.4f", mpid5_rate));
+
+ // Reset counters.
+ mpid_then = mpid_now;
+ mpid_previous = mpid_current;
+ mpid5_rate = 0;
+ }
}
#endif
#ifdef WATCH_MUTEX
-handle_mutex()
+measure_mutex()
{
put_output(" smtx", sprintf("%5d", tmp_mutex.smtx));
put_output("smtx/cpu", sprintf("%8d", tmp_mutex.smtx/tmp_mutex.ncpus));
+ put_output("ncpus", sprintf("%5d", tmp_mutex.ncpus));
}
#endif
#ifdef WATCH_NET
-handle_net()
+measure_net()
{
- int i;
+ int previous_count = -1;
+ int current_count;
+ int i;
- for (i=0; i<tmp_nr.net_count; ++i) {
- put_output(sprintf("%4sIpkt/s", tmp_nr.names[i]),
- sprintf("%10.3f", GLOBAL_net[i].ipackets));
- put_output(sprintf("%4sOpkt/s", tmp_nr.names[i]),
- sprintf("%10.3f", GLOBAL_net[i].opackets));
- put_output(sprintf("%4sInKB/s", tmp_nr.names[i]),
- sprintf("%10.3f", GLOBAL_net[i].ioctets/1024.0));
- put_output(sprintf("%4sOuKB/s", tmp_nr.names[i]),
- sprintf("%10.3f", GLOBAL_net[i].ooctets/1024.0));
- put_output(sprintf("%4sIErr/s", tmp_nr.names[i]),
- sprintf("%10.3f", GLOBAL_net[i].ierrors));
- put_output(sprintf("%4sOErr/s", tmp_nr.names[i]),
- sprintf("%10.3f", GLOBAL_net[i].oerrors));
- put_output(sprintf("%4sColl%%", tmp_nr.names[i]),
- sprintf("%9.3f", GLOBAL_net[i].collpercent));
- put_output(sprintf("%4sNoCP/s", tmp_nr.names[i]),
- sprintf("%10.3f", GLOBAL_net[i].nocanput));
- put_output(sprintf("%4sDefr/s", tmp_nr.names[i]),
- sprintf("%10.3f", GLOBAL_net[i].defer));
+ current_count = 0;
+ for (i=0; i<tmp_nr.net_count; i++) {
+ // Skip unused interfaces.
+// if (GLOBAL_net[i].up == 0) {
+// continue;
+// }
+ ++current_count;
+ put_output(sprintf("%5sIpkt/s", tmp_nr.names[i]),
+ sprintf("%11.3f", GLOBAL_net[i].ipackets));
+ put_output(sprintf("%5sOpkt/s", tmp_nr.names[i]),
+ sprintf("%11.3f", GLOBAL_net[i].opackets));
+ put_output(sprintf("%5sInKB/s", tmp_nr.names[i]),
+ sprintf("%11.3f", GLOBAL_net[i].ioctets/1024.0));
+ put_output(sprintf("%5sOuKB/s", tmp_nr.names[i]),
+ sprintf("%11.3f", GLOBAL_net[i].ooctets/1024.0));
+ put_output(sprintf("%5sIErr/s", tmp_nr.names[i]),
+ sprintf("%11.3f", GLOBAL_net[i].ierrors));
+ put_output(sprintf("%5sOErr/s", tmp_nr.names[i]),
+ sprintf("%11.3f", GLOBAL_net[i].oerrors));
+ put_output(sprintf("%5sColl%%", tmp_nr.names[i]),
+ sprintf("%10.3f", GLOBAL_net[i].collpercent));
+ put_output(sprintf("%5sNoCP/s", tmp_nr.names[i]),
+ sprintf("%11.3f", GLOBAL_net[i].nocanput));
+ put_output(sprintf("%5sDefr/s", tmp_nr.names[i]),
+ sprintf("%11.3f", GLOBAL_net[i].defer));
+ }
+
+ // If the number of up interfaces changes, then print new headers.
+ if (current_count != previous_count) {
+ print_header = 1;
+ previous_count = current_count;
}
}
#endif
#ifdef WATCH_TCP
-handle_tcp()
+measure_tcp()
{
put_output("tcp_Iseg/s", sprintf("%10.3f", tmp_tcp.InDataSegs));
put_output("tcp_Oseg/s", sprintf("%10.3f", tmp_tcp.OutDataSegs));
@@ -575,7 +889,7 @@
#endif
#ifdef WATCH_NFS
-handle_nfs()
+measure_nfs()
{
put_output("nfs_call/s", sprintf("%10.3f", tmp_lrpcc.calls));
put_output("nfs_timo/s", sprintf("%10.3f", tmp_lrpcc.timeouts));
@@ -583,30 +897,142 @@
}
#endif
+#ifdef WATCH_MOUNTS
+measure_mounts()
+{
+ statvfs_t vfs_array[1];
+ statvfs_t vfs;
+ string comment_fmt;
+ string kbytes_fmt;
+ string inode_fmt;
+ string percent_fmt;
+ ulong kbytes_used;
+ ulong inodes_used;
+ double block_factor;
+ int comment_length;
+ int previous_count = -1;
+ int current_count;
+
+ current_count = 0;
+ // Traverse the mount table to find mounted ufs/vxfs file systems.
+ for (mnt$mnt.number$=0; mnt$mnt.number$ != -1; mnt$mnt.number$++) {
+ tmp_mnt = mnt$mnt;
+ if (tmp_mnt.mnt_fstype == "ufs" || tmp_mnt.mnt_fstype == "vxfs") {
+ // Skip locally mounted /cdrom partitions.
+ if (tmp_mnt.mnt_mountp =~ "^/cdrom/") {
+ continue;
+ }
+ if (statvfs(tmp_mnt.mnt_mountp, vfs_array) == -1) {
+ continue;
+ }
+ vfs = vfs_array[0];
+ ++current_count;
+
+ // Generate the format strings for the comment and for the data.
+ comment_fmt = sprintf("mnt%%c_%s", tmp_mnt.mnt_mountp);
+ comment_length = strlen(comment_fmt) - 1;
+ kbytes_fmt = sprintf("%%%d.0f", comment_length);
+ inode_fmt = sprintf("%%%dld", comment_length);
+ percent_fmt = sprintf("%%%d.3f", comment_length);
+
+ // Calculate the number of 1 kilobyte blocks on the disk.
+ block_factor = vfs.f_frsize/1024;
+
+ // Capital letters refer to the disk usage in kilobytes. Lower case
+ // letters refer to inode usage.
+ // C - Capacity of the disk.
+ // U - Used capacity.
+ // A - Available capacity for non-root users.
+ // P - Percent used.
+ kbytes_used = vfs.f_blocks - vfs.f_bfree;
+ inodes_used = vfs.f_files - vfs.f_ffree;
+
+ put_output(sprintf(comment_fmt, 'C'),
+ sprintf(kbytes_fmt, block_factor*vfs.f_blocks));
+ put_output(sprintf(comment_fmt, 'U'),
+ sprintf(kbytes_fmt, block_factor*kbytes_used));
+ put_output(sprintf(comment_fmt, 'A'),
+ sprintf(kbytes_fmt, block_factor*vfs.f_bavail));
+ put_output(sprintf(comment_fmt, 'P'),
+ sprintf(percent_fmt,
+ 100.0*kbytes_used/(vfs.f_blocks + vfs.f_bavail - vfs.f_bfree)));
+
+ put_output(sprintf(comment_fmt, 'c'),
+ sprintf(inode_fmt, vfs.f_files));
+ put_output(sprintf(comment_fmt, 'u'),
+ sprintf(inode_fmt, inodes_used));
+ put_output(sprintf(comment_fmt, 'a'),
+ sprintf(inode_fmt, vfs.f_favail));
+ put_output(sprintf(comment_fmt, 'p'),
+ sprintf(percent_fmt,
+ 100.0*inodes_used/(vfs.f_files + vfs.f_favail - vfs.f_ffree)));
+
+ }
+ }
+
+ // If the number of mounted filesystems changes, then print new headers.
+ if (current_count != previous_count) {
+ print_header = 1;
+ previous_count = current_count;
+ }
+}
+#endif
+
#ifdef WATCH_DISK
-handle_disk()
+measure_disk()
{
double mean_disk_busy;
double peak_disk_busy;
+ double total_reads;
+ double total_writes;
+ double total_readk;
+ double total_writek;
+ int previous_count = -1;
int i;
mean_disk_busy = 0.0;
peak_disk_busy = 0.0;
- for (i=0; i < GLOBAL_disk[0].disk_count; i++) {
+ total_reads = 0.0;
+ total_writes = 0.0;
+ total_readk = 0.0;
+ total_writek = 0.0;
+ for (i=0; i<GLOBAL_disk_count; i++) {
+ put_output(sprintf("disk_runp_c%dt%dd%d",
+ GLOBAL_disk[i].info.controller,
+ GLOBAL_disk[i].info.target,
+ GLOBAL_disk[i].info.device),
+ sprintf("%16.5f", GLOBAL_disk[i].run_percent));
+ total_reads += GLOBAL_disk[i].reads;
+ total_writes += GLOBAL_disk[i].writes;
+ total_readk += GLOBAL_disk[i].kreads;
+ total_writek += GLOBAL_disk[i].kwrites;
mean_disk_busy += GLOBAL_disk[i].run_percent;
if (GLOBAL_disk[i].run_percent > peak_disk_busy) {
peak_disk_busy = GLOBAL_disk[i].run_percent;
}
}
- mean_disk_busy = mean_disk_busy/GLOBAL_disk[0].disk_count;
+ if (GLOBAL_disk_count != 0) {
+ mean_disk_busy = mean_disk_busy/GLOBAL_disk_count;
+ }
put_output("disk_peak", sprintf("%9.3f", peak_disk_busy));
put_output("disk_mean", sprintf("%9.3f", mean_disk_busy));
+ put_output("disk_rd/s", sprintf("%9.1f", total_reads));
+ put_output("disk_wr/s", sprintf("%9.1f", total_writes));
+ put_output("disk_rK/s", sprintf("%9.1f", total_readk));
+ put_output("disk_wK/s", sprintf("%9.1f", total_writek));
+
+ // If the number of disks has changed, say due to a add_drv, then print
+ // new headers.
+ if (previous_count != GLOBAL_disk_count) {
+ print_header = 1;
+ previous_count = GLOBAL_disk_count;
+ }
}
#endif
#ifdef WATCH_DNLC
-handle_dnlc()
+measure_dnlc()
{
put_output("dnlc_ref/s", sprintf("%10.3f", tmp_lrdnlc.refrate));
put_output("dnlc_hit%", sprintf("%9.3f", tmp_lrdnlc.hitrate));
@@ -614,7 +1040,7 @@
#endif
#ifdef WATCH_INODE
-handle_inode()
+measure_inode()
{
put_output("inod_ref/s", sprintf("%10.3f", tmp_lrinode.refrate));
put_output("inod_hit%", sprintf("%9.3f", tmp_lrinode.hitrate));
@@ -623,16 +1049,17 @@
#endif
#ifdef WATCH_RAM
-handle_ram()
+measure_ram()
{
put_output("swap_avail", sprintf("%10ld", GLOBAL_pvm[0].swap_avail));
put_output("page_rstim", sprintf("%10d", tmp_lrram.restime));
+ put_output(" freememK", sprintf("%10d", GLOBAL_pvm[0].freemem));
put_output("free_pages", sprintf("%10d", (GLOBAL_pvm[0].freemem*1024)/page_size));
}
#endif
#ifdef WATCH_PAGES
-handle_pages()
+measure_pages()
{
put_output("pp_kernel", sprintf("%9lu", tmp_kstat_pages.pp_kernel));
put_output("pagesfree", sprintf("%9lu", tmp_kstat_pages.pagesfree));
@@ -642,241 +1069,257 @@
}
#endif
-#ifdef WATCH_HTTPD
-/* breakdown access log format */
+#ifdef WATCH_WEB
+// Breakdown access log format.
accesslog(string buf) {
- int size_index;
- string word;
- int z;
- int ishead;
-#if WATCH_PROXY || WATCH_SQUID
- double xf;
+ int z;
+ int size_index;
+ int ishead;
+ string word;
+ char first_byte[1];
+
+#if WATCH_PROXY || WATCH_SQUID || WATCH_YAHOO
+ double xf;
#ifdef WATCH_SQUID
- string logtag;
- string request;
+ string logtag;
+ string request;
#endif
+#ifdef WATCH_YAHOO
+ string arg;
+ ulong ptr;
+ ulong tmp;
+ ulong ulong_xf;
+#endif
+#endif
+
+ ishead = 0;
+#ifdef WATCH_YAHOO
+ // Make sure that the input line has at least 32 bytes of data plus a new
+ // line, for a total length of 33.
+ if (strlen(buf) < 33) {
+ return;
+ }
+ word = strtok(buf,"\05");
+#else
+ word = strtok(buf," ");
#endif
- ishead = 0;
- word = strtok(buf," ");
- if (word == nil) {
- return;
- }
+ if (word == nil) {
+ return;
+ }
+
#ifdef WATCH_SQUID
- /* word contains unix time in seconds.milliseconds */
- word = strtok(nil, " "); /* elapsed request time in ms */
- xf = atof(word)/1000.0;
- prxy_squid_xfer_sum += xf;
+ // Word contains unix time in seconds.milliseconds.
+ word = strtok(nil, " "); // Elapsed request time in ms
+ xf = atof(word)/1000.0;
+ www_dwnld_time_sum += xf;
#ifdef DINKY
- printf("time: %s %f total %f\n", word, xf, xfer_sum);
+ printf("time: %s %f total %f\n", word, xf, xfer_sum);
#endif
- word = strtok(nil, " "); /* client IP address */
- logtag = strtok(nil, "/"); /* Log tag */
- word = strtok(nil, " "); /* reply code */
- if (word != nil) {
- if (strstr(word,"304") != nil) {
- httpop_condgets++;
- }
- if (strncmp(word, "4", 1) == 0 || strncmp(word, "5", 1) == 0) {
- httpop_errors++;
- }
- }
- word = strtok(nil, " "); /* size sent to client */
- z = atoi(word);
- request = strtok(nil, " "); /* Request method */
- word = strtok(nil, " "); /* URL */
- if (word != nil) {
- if (strstr(word,"cgi-bin") != nil) {
- httpop_cgi_bins++;
- }
- if (strstr(word, search_url) != nil) {
- httpop_searches++;
- }
- }
- word = strtok(nil, " "); /* optional user ident */
- word = strtok(nil, "/"); /* Hierarchy */
- if (word != nil) {
- if (strstr(word, "DIRECT") == nil) {
- prxy_squid_indirect++;
- }
- }
- word = strtok(nil, " "); /* Hostname */
- word = strtok(nil, " "); /* content-type */
-
- /* process the collected data */
- if (strstr(logtag, "TCP") != nil) {
- squid_client_http++;
- }
- if (strstr(logtag, "UDP") != nil) {
- squid_icp_requests++;
- }
- if (strstr(logtag, "HIT") != nil) {
- prxy_squid_cache_hits++;
- }
- if (strstr(logtag, "MISS") != nil) {
- squid_cache_misses++;
- }
- switch(request) {
- case "GET":
- httpop_gets++;
- break;
- case "POST":
- httpop_posts++;
- break;
- case "HEAD":
- ishead = 1;
- httpop_condgets++;
- break;
- case "ICP_QUERY":
- squid_icp_queries++;
- default: break;
- }
- if (ishead == 0) { /* don't add size if its a HEAD */
- dwnld_totalz += z;
- }
- if (z < 1024) {
- size_index=0; /* under 1K */
- } else {
- if (z < 10240) {
- size_index=1; /* under 10K */
- } else {
- if (z < 102400) {
- size_index=2; /* under 100K */
- } else {
- if (z < 1048576) {
- size_index=3; /* < 1MB */
- } else {
- size_index=4; /* >= 1MB */
- }
- }
- }
- }
- dwnld_size[size_index]++;
- prxy_squid_xfer_by_size[size_index] += xf;
+ word = strtok(nil, " "); // Client IP address
+ logtag = strtok(nil, "/"); // Log tag
+ word = strtok(nil, " "); // Reply code
+ WWW_REPLY_CODE(word)
+ word = strtok(nil, " "); // Size sent to client
+ z = atoi(word);
+ request = strtok(nil, " "); // Request method
+ word = strtok(nil, " "); // URL
+ if (word != nil) {
+ if (word =~ "cgi-bin") {
+ httpop_cgi_bins++;
+ }
+ if (word =~ www_search_url) {
+ httpop_searches++;
+ }
+ }
+ word = strtok(nil, " "); // Optional user ident
+ word = strtok(nil, "/"); // Hierarchy
+ if (word != nil) {
+ if (word =~ "DIRECT") {
+ prxy_squid_indirect++;
+ }
+ }
+ word = strtok(nil, " "); // Hostname
+ word = strtok(nil, " "); // Content-type
+
+ // Process the collected data.
+ if (logtag =~ "TCP") {
+ squid_client_http++;
+ }
+ if (logtag =~ "UDP") {
+ squid_icp_requests++;
+ }
+ if (logtag =~ "HIT") {
+ prxy_squid_cache_hits++;
+ }
+ if (logtag =~ "MISS") {
+ squid_cache_misses++;
+ }
+ WWW_METHOD(request)
+
+ // Do not add size if its a HEAD.
+ if (ishead == 0) {
+ dwnld_totalz += z;
+ }
+
+ WWW_SIZE_INDEX(z, size_index)
+ www_dwnld_time_by_size[size_index] += xf;
+
+#elif WATCH_YAHOO
+ // Yahoo log format. Fields in square brackets will only appear in the
+ // log file if the data actually exists (ie. you will never see a null
+ // Referrer field). Further, fields labelled here with "(CONFIG)" will
+ // only appear if they are enabled via the YahooLogOptions configuration
+ // directive.
+ //
+ // IP Address (8 hex digits)
+ // Timestamp (time_t as 8 hex digits)
+ // Processing Time (in microseconds, as 8 hex digits)
+ // Bytes Sent (8 hex digits)
+ // URL
+ // [^Er referrer] (CONFIG)
+ // [^Em method] (CONFIG)
+ // [^Es status_code]
+ // ^Ed signature
+ // \n
+
+ // Ignore the IP address and timestamp. Get the processing time, the
+ // number of bytes sent and the URL. For each portion of the line, split
+ // it up into separate pieces.
+ if (sscanf(word, "%8lx%8lx%8x%8x", &tmp, &tmp, &ulong_xf, &z) != 4) {
+ return;
+ }
+
+ xf = ulong_xf/1000000.0;
+ WWW_SIZE_INDEX(z, size_index)
+ www_dwnld_time_sum += xf;
+ www_dwnld_time_by_size[size_index] += xf;
+
+ if (word =~ "cgi-bin") {
+ httpop_cgi_bins++;
+ }
+ if (word =~ www_search_url) {
+ httpop_searches++;
+ }
+
+ for (;;) {
+ word = strtok(nil, "\05");
+ if (word == nil) {
+ break;
+ }
+ first_byte = word;
+ ptr = &word + 1;
+ arg = ((string) ptr);
+ ptr = 0;
+ switch (first_byte[0]) {
+ case 'm':
+ WWW_METHOD(arg)
+ ptr = 1;
+ break;
+ case 's':
+ WWW_REPLY_CODE(arg)
+ break;
+ default:
+ break;
+ }
+ }
+
+ // If no method was seen, then assume it was a GET.
+ if (ptr == 0) {
+ httpop_gets++;
+ }
+
+ // Do not add size if its a HEAD.
+ if (ishead == 0) {
+ dwnld_totalz += z;
+ }
+
#else /* common or netscape proxy formats */
- strtok(nil, " "); /* - */
- strtok(nil, " "); /* - */
- strtok(nil, " ["); /* date */
- strtok(nil, " "); /* zone] */
- word = strtok(nil, " \""); /* GET or POST */
- switch (word) {
- case "get":
- case "GET":
- httpop_gets++;
- break;
- case "post":
- case "POST":
- httpop_posts++;
- break;
- case "head":
- case "HEAD":
- ishead = 1;
- httpop_condgets++;
- break;
- default:
- break;
- }
- word = strtok(nil, " "); /* URL */
- if (word != nil) {
- if (strstr(word,"cgi-bin") != nil) {
- httpop_cgi_bins++;
- }
- if (strstr(word, search_url) != nil) {
- httpop_searches++;
- }
- }
- // Sometimes HTTP/1.x is not listed in the access log. Skip it
- // if it does exist. Load the error/success code.
- word = strtok(nil, " ");
- if (word != nil) {
- if (strstr(word, "HTTP") != nil || strstr(word, "http") != nil) {
- word = strtok(nil, " ");
- }
- }
- if (word != nil) {
- if (strstr(word,"304") != nil) {
- httpop_condgets++;
- }
- else {
- if (strncmp(word, "4", 1) == 0 || strncmp(word, "5", 1) == 0) {
- httpop_errors++;
- }
- }
- }
- word = strtok(nil, " "); // bytes transferred
- if (word != nil) {
- z = atoi(word);
- if (ishead == 0) { // don't add size if its a HEAD
- dwnld_totalz += z;
- }
- if ((z % 1024) == z) {
- size_index=0; /* under 1K */
- } else {
- if ((z % 10240) == z) {
- size_index=1; /* under 10K */
- } else {
- if ((z % 102400) == z) {
- size_index=2; /* under 100K */
- } else {
- if ((z % 1048576) == z) {
- size_index=3; /* < 1MB */
- } else {
- size_index=4; /* >= 1MB */
- }
- }
- }
- }
- dwnld_size[size_index]++;
- }
+ strtok(nil, " "); // -
+ strtok(nil, " "); // -
+ strtok(nil, " ["); // date
+ strtok(nil, " "); // zone]
+ word = strtok(nil, " \""); // GET or POST
+ WWW_METHOD(word)
+ word = strtok(nil, " "); // URL
+ if (word != nil) {
+ if (word =~ "cgi-bin") {
+ httpop_cgi_bins++;
+ }
+ if (word =~ www_search_url) {
+ httpop_searches++;
+ }
+ }
+ // Sometimes HTTP/1.x is not listed in the access log. Skip it
+ // if it does exist. Load the error/success code.
+ word = strtok(nil, " ");
+ if (word != nil && (word =~ "HTTP" || word =~ "http")) {
+ word = strtok(nil, " ");
+ }
+ WWW_REPLY_CODE(word)
+ word = strtok(nil, " "); // Bytes transferred.
+ if (word != nil) {
+ z = atoi(word);
+ if (ishead == 0) { // Do not add size if its a HEAD.
+ dwnld_totalz += z;
+ }
+ WWW_SIZE_INDEX(z, size_index)
+ }
#ifdef WATCH_PROXY
- word = strtok(nil, " "); /* status from server */
- word = strtok(nil, " "); /* length from server */
- word = strtok(nil, " "); /* length from client POST */
- word = strtok(nil, " "); /* length POSTed to remote */
- word = strtok(nil, " "); /* client header req */
- word = strtok(nil, " "); /* proxy header resp */
- word = strtok(nil, " "); /* proxy header req */
- word = strtok(nil, " "); /* server header resp */
- word = strtok(nil, " "); /* transfer total secs */
- word = strtok(nil, " "); /* route */
- if (word != nil) { /* - DIRECT PROXY(host.domain:port) SOCKS */
- if (strncmp(word, "PROXY", 5) == 0 ||
- strncmp(word, "SOCKS", 5) == 0) {
- prxy_squid_indirect++;
- }
- }
- word = strtok(nil, " "); /* client finish status */
- word = strtok(nil, " "); /* server finish status */
- word = strtok(nil, " "); /* cache finish status */
- /* - ERROR HOST-NOT-AVAILABLE = error or incomplete op
- WRITTEN REFRESHED CL-MISMATCH(content length mismatch) = cache_writes
- NO-CHECK UP-TO-DATE = cache_hits
- DO-NOT-CACHE NON-CACHEABLE = uncacheable */
- if (word != nil) {
- switch(word) {
- case "WRITTEN":
- case "REFRESHED":
- case "CL-MISMATCH": prxy_cache_writes++;
- break;
- case "NO-CHECK":
- case "UP-TO-DATE": prxy_squid_cache_hits++;
- break;
- case "DO-NOT-CACHE":
- case "NON-CACHEABLE": prxy_uncacheable++;
- break;
- default: break;
- }
- }
- word = strtok(nil, " ["); /* [transfer total time x.xxx */
- if (word != nil) {
- xf = atof(word);
- prxy_squid_xfer_by_size[size_index] += xf;
- prxy_squid_xfer_sum += xf;
- }
+ word = strtok(nil, " "); // status from server
+ word = strtok(nil, " "); // length from server
+ word = strtok(nil, " "); // length from client POST
+ word = strtok(nil, " "); // length POSTed to remote
+ word = strtok(nil, " "); // client header req
+ word = strtok(nil, " "); // proxy header resp
+ word = strtok(nil, " "); // proxy header req
+ word = strtok(nil, " "); // server header resp
+ word = strtok(nil, " "); // transfer total secs
+ word = strtok(nil, " "); // route
+
+ // - DIRECT PROXY(host.domain:port) SOCKS
+ if (word != nil) {
+ if (strncmp(word, "PROXY", 5) == 0 ||
+ strncmp(word, "SOCKS", 5) == 0) {
+ prxy_squid_indirect++;
+ }
+ }
+ word = strtok(nil, " "); // client finish status
+ word = strtok(nil, " "); // server finish status
+ word = strtok(nil, " "); // cache finish status
+ // ERROR HOST-NOT-AVAILABLE = error or incomplete op
+ // WRITTEN REFRESHED CL-MISMATCH(content length mismatch) = cache_writes
+ // NO-CHECK UP-TO-DATE = cache_hits
+ // DO-NOT-CACHE NON-CACHEABLE = uncacheable
+ if (word != nil) {
+ switch(word) {
+ case "WRITTEN":
+ case "REFRESHED":
+ case "CL-MISMATCH":
+ prxy_cache_writes++;
+ break;
+ case "NO-CHECK":
+ case "UP-TO-DATE":
+ prxy_squid_cache_hits++;
+ break;
+ case "DO-NOT-CACHE":
+ case "NON-CACHEABLE":
+ prxy_uncacheable++;
+ break;
+ default:
+ break;
+ }
+ }
+ word = strtok(nil, " ["); // [transfer total time x.xxx
+ if (word != nil) {
+ xf = atof(word);
+ www_dwnld_time_sum += xf;
+ www_dwnld_time_by_size[size_index] += xf;
+ }
#endif
#endif
}
-measure_httpd(long sleep_till)
+measure_web(long sleep_till)
{
double lastops = 0.0;
char buf[BUFSIZ];
@@ -895,19 +1338,21 @@
for (i=0; i<5; i++) {
dwnld_size[i] = 0;
-#if WATCH_PROXY || WATCH_SQUID
- prxy_squid_xfer_by_size[i] = 0.0;
+#if WATCH_PROXY || WATCH_SQUID || WATCH_YAHOO
+ www_dwnld_time_by_size[i] = 0.0;
#endif
}
dwnld_totalz = 0;
+#if WATCH_PROXY || WATCH_SQUID || WATCH_YAHOO
+ www_dwnld_time_sum = 0.0;
+#endif
#if WATCH_PROXY || WATCH_SQUID
- prxy_squid_xfer_sum = 0.0;
- prxy_squid_indirect = 0;
- prxy_squid_cache_hits = 0;
+ prxy_squid_indirect = 0;
+ prxy_squid_cache_hits = 0;
#ifdef WATCH_PROXY
- prxy_cache_writes = 0;
- prxy_uncacheable = 0;
+ prxy_cache_writes = 0;
+ prxy_uncacheable = 0;
#else
squid_cache_misses = 0;
squid_icp_requests = 0;
@@ -916,14 +1361,20 @@
#endif
#endif
- if (log_name != nil) {
- while (time(0) < sleep_till) {
+ if (www_log_filename != nil) {
+ now = time(0);
+ while (now < sleep_till) {
+#ifdef WATCH_CPU
+ sleep_till_and_count_new_proceses(now + 5);
+#else
sleep(5);
- if (log_file != 0) {
- while (fgets(buf, BUFSIZ, log_file) != nil) {
+#endif
+ now = time(0);
+ if (www_fd != 0) {
+ while (fgets(buf, BUFSIZ, www_fd) != nil) {
httpops += 1.0;
- if (log_gatelen > 0) {
- if (strncmp(buf, log_gateway, log_gatelen) == 0) {
+ if (www_gatelen > 0) {
+ if (strncmp(buf, www_gateway, www_gatelen) == 0) {
gateops += 1.0;
}
}
@@ -932,19 +1383,19 @@
}
/* see if the file has been switched or truncated */
- stat(log_name, log_stat);
- if (log_ino != log_stat[0].st_ino || log_size > log_stat[0].st_size) {
- if (log_file != 0) {
- fclose(log_file); /* close the old log */
+ stat(www_log_filename, www_stat);
+ if (www_ino != www_stat[0].st_ino || www_size > www_stat[0].st_size) {
+ if (www_fd != 0) {
+ fclose(www_fd); /* close the old log */
}
/* log file has changed, open the new one */
- log_file = fopen(log_name, "r");
- if (log_file != 0) {
- log_ino = log_stat[0].st_ino;
- while(fgets(buf, BUFSIZ, log_file) != nil) {
+ www_fd = fopen(www_log_filename, "r");
+ if (www_fd != 0) {
+ www_ino = www_stat[0].st_ino;
+ while(fgets(buf, BUFSIZ, www_fd) != nil) {
httpops += 1.0;
- if (log_gatelen > 0) {
- if (strncmp(buf, log_gateway, log_gatelen) == 0) {
+ if (www_gatelen > 0) {
+ if (strncmp(buf, www_gateway, www_gatelen) == 0) {
gateops += 1.0;
}
}
@@ -953,30 +1404,27 @@
}
}
- log5_now = gethrtime();
- log5_interval = (log5_now - log5_then) * 0.000000001;
- log5_then = log5_now;
- dtmp = (httpops - lastops)/log5_interval;
+ www5_now = gethrtime();
+ www5_interval = (www5_now - www5_then) * 0.000000001;
+ www5_then = www5_now;
+ dtmp = (httpops - lastops)/www5_interval;
if (dtmp > httpops5) {
httpops5 = dtmp;
}
lastops = httpops;
// Remember size for next time.
- log_size = log_stat[0].st_size;
+ www_size = www_stat[0].st_size;
}
}
else {
- now = time(0);
- while (now < sleep_till) {
- sleep(sleep_till - now);
- now = time(0);
- }
+ sleep_till_and_count_new_proceses(sleep_till);
+ www5_now = gethrtime();
}
- log_now = gethrtime();
- log_interval = (log_now - log_then) * 0.000000001;
- log_then = log_now;
+ www_now = www5_now;
+ www_interval = (www_now - www_then) * 0.000000001;
+ www_then = www_now;
// Use dtmp to get percentages.
if (httpops == 0.0) {
@@ -986,13 +1434,13 @@
dtmp = 100.0 / httpops;
}
-#if WATCH_PROXY || WATCH_SQUID
- for (i=0; i<5; ++i) {
+#if WATCH_PROXY || WATCH_SQUID || WATCH_YAHOO
+ for (i=0; i<5; i++) {
if (dwnld_size[i] == 0) {
- prxy_squid_xfer_by_size[i] = 0.0;
+ www_dwnld_time_by_size[i] = 0.0;
}
else {
- prxy_squid_xfer_by_size[i] = prxy_squid_xfer_by_size[i]/dwnld_size[i];
+ www_dwnld_time_by_size[i] = www_dwnld_time_by_size[i]/dwnld_size[i];
}
}
#endif
@@ -1000,7 +1448,7 @@
int count_proc(string name)
{
- int count;
+ int count;
prpsinfo_t p;
count = 0;
@@ -1014,37 +1462,44 @@
put_httpd()
{
- put_output("#httpds", sprintf("%7d", count_proc("httpd")));
- put_output("httpop/s", sprintf("%8.2f", httpops/log_interval));
- put_output("http/p5s", sprintf("%8.2f", httpops5));
- put_output("cndget/s", sprintf("%8.2f", httpop_condgets/log_interval));
- put_output("search/s", sprintf("%8.3f", httpop_searches/log_interval));
- put_output(" cgi/s", sprintf("%8.3f", httpop_cgi_bins/log_interval));
- put_output(" htErr/s", sprintf("%8.3f", httpop_errors/log_interval));
- put_output(" httpb/s", sprintf("%8.0f", dwnld_totalz/log_interval));
- put_output(" %to1KB", sprintf("%8.2f", dtmp*dwnld_size[0]));
- put_output(" %to10KB", sprintf("%8.2f", dtmp*dwnld_size[1]));
- put_output("%to100KB", sprintf("%8.2f", dtmp*dwnld_size[2]));
- put_output(" %to1MB", sprintf("%8.2f", dtmp*dwnld_size[3]));
- put_output("%over1MB", sprintf("%8.2f", dtmp*dwnld_size[4]));
- put_output(log_gateway, sprintf("%8.2f", gateops/log_interval));
+ put_output("#httpds", sprintf("%7d", count_proc(www_server_proc_name)));
+ put_output("httpop/s", sprintf("%8.2f", httpops/www_interval));
+ put_output("http/p5s", sprintf("%8.2f", httpops5));
+ put_output("cndget/s", sprintf("%8.2f", httpop_condgets/www_interval));
+ put_output("search/s", sprintf("%8.3f", httpop_searches/www_interval));
+ put_output(" cgi/s", sprintf("%8.3f", httpop_cgi_bins/www_interval));
+ put_output(" htErr/s", sprintf("%8.3f", httpop_errors/www_interval));
+ put_output(" httpb/s", sprintf("%8.0f", dwnld_totalz/www_interval));
+ put_output(" %to1KB", sprintf("%8.2f", dtmp*dwnld_size[0]));
+ put_output(" %to10KB", sprintf("%8.2f", dtmp*dwnld_size[1]));
+ put_output("%to100KB", sprintf("%8.2f", dtmp*dwnld_size[2]));
+ put_output(" %to1MB", sprintf("%8.2f", dtmp*dwnld_size[3]));
+ put_output("%over1MB", sprintf("%8.2f", dtmp*dwnld_size[4]));
+ put_output(www_gateway, sprintf("%8.2f", gateops/www_interval));
#if WATCH_PROXY || WATCH_SQUID
- put_output(" %indir", sprintf("%8.2f", dtmp * prxy_squid_indirect));
- put_output("%cch_hit", sprintf("%8.2f", dtmp * prxy_squid_cache_hits));
+ put_output(" %indir", sprintf("%8.2f", dtmp * prxy_squid_indirect));
+ put_output("%cch_hit", sprintf("%8.2f", dtmp * prxy_squid_cache_hits));
#ifdef WATCH_PROXY
- put_output("%cch_wrt", sprintf("%8.2f", dtmp * prxy_cache_writes));
- put_output("%cch_unc", sprintf("%8.2f", dtmp * prxy_uncacheable));
+ put_output("%cch_wrt", sprintf("%8.2f", dtmp * prxy_cache_writes));
+ put_output("%cch_unc", sprintf("%8.2f", dtmp * prxy_uncacheable));
#else
- put_output("%cch_mis", sprintf("%8.2f", dtmp * squid_cache_misses));
- put_output("%cch_req", sprintf("%8.2f", dtmp * squid_icp_requests));
- put_output("%cch_qry", sprintf("%8.2f", dtmp * squid_icp_queries));
-#endif
- put_output(" xfr_t", sprintf("%8.2f", 0.01 * dtmp * prxy_squid_xfer_sum));
- put_output(" xfr1_t", sprintf("%8.2f", prxy_squid_xfer_by_size[0]));
- put_output(" xfr10_t", sprintf("%8.2f", prxy_squid_xfer_by_size[1]));
- put_output("xfr100_t", sprintf("%8.2f", prxy_squid_xfer_by_size[2]));
- put_output(" xfr1M_t", sprintf("%8.2f", prxy_squid_xfer_by_size[3]));
- put_output("xfro1M_t", sprintf("%8.2f", prxy_squid_xfer_by_size[4]));
+ put_output("%cch_mis", sprintf("%8.2f", dtmp * squid_cache_misses));
+ put_output("%cch_req", sprintf("%8.2f", dtmp * squid_icp_requests));
+ put_output("%cch_qry", sprintf("%8.2f", dtmp * squid_icp_queries));
+#endif
+ put_output(" xfr_t", sprintf("%8.2f", 0.01 * dtmp * www_dwnld_time_sum));
+ put_output(" xfr1_t", sprintf("%8.2f", www_dwnld_time_by_size[0]));
+ put_output(" xfr10_t", sprintf("%8.2f", www_dwnld_time_by_size[1]));
+ put_output("xfr100_t", sprintf("%8.2f", www_dwnld_time_by_size[2]));
+ put_output(" xfr1M_t", sprintf("%8.2f", www_dwnld_time_by_size[3]));
+ put_output("xfro1M_t", sprintf("%8.2f", www_dwnld_time_by_size[4]));
+#elif WATCH_YAHOO
+ put_output(" wprc_t", sprintf("%9.5f", 0.01 * dtmp * www_dwnld_time_sum));
+ put_output(" wprc1_t", sprintf("%9.5f", www_dwnld_time_by_size[0]));
+ put_output(" wprc10_t", sprintf("%9.5f", www_dwnld_time_by_size[1]));
+ put_output("wprc100_t", sprintf("%9.5f", www_dwnld_time_by_size[2]));
+ put_output(" wprc1M_t", sprintf("%9.5f", www_dwnld_time_by_size[3]));
+ put_output("wprco1M_t", sprintf("%9.5f", www_dwnld_time_by_size[4]));
#endif
}
#endif
Modified: trunk/orca/orcallator/start_orcallator.sh.in
==============================================================================
--- trunk/orca/orcallator/start_orcallator.sh.in (original)
+++ trunk/orca/orcallator/start_orcallator.sh.in Sat Jul 13 19:22:25 2002
@@ -7,14 +7,34 @@
exec_prefix=@exec_prefix@
libdir=@libdir@
AWK=@AWK@
+COMPRESSOR="@COMPRESSOR@"
CUT=@CUT@
EXPR=@EXPR@
UNAME=@UNAME@
ORCALLATOR_DIR=@ORCALLATOR_DIR@
SE=@SE@
-WATCH_HTTPD="@WATCH_HTTPD@"
+WATCH_WEB="@WATCH_WEB@"
WEB_LOG=@WEB_LOG@
+# Check if the SE executable was found upon configure.
+if test -z "$SE"; then
+ echo "When you configured Orca the se executable was not found. If you"
+ echo "do not have the SE toolkit installed on your system, then follow"
+ echo "the steps in section 8 of INSTALL."
+ echo ""
+ echo "Once you have the SE toolkit installed on your system, then either"
+ echo "rerun configure so that it finds se, or edit start_orcallator.sh"
+ echo "and define SE to the location of se."
+ exit 1
+fi
+
+if test ! -x "$SE"; then
+ echo "The SE executable at"
+ echo " $SE"
+ echo "does not exist or is not executable. Please correct this problem."
+ exit 1
+fi
+
# Get the hostname without the fully qualified part; that is, trim off
# anything past the first `.'.
uname=`$UNAME -n | $CUT -d. -f1`
@@ -23,7 +43,7 @@
OUTDIR=$ORCALLATOR_DIR/$uname
# Export the environmental variables.
-export OUTDIR WEB_LOG
+export COMPRESSOR OUTDIR WEB_LOG
# Check if orcallator is already running.
pids=`/usr/ucb/ps auxww | $AWK '/orcallator.se/ && !/awk/ {print $2}'`
@@ -53,7 +73,7 @@
# Now start the logging.
echo "Starting logging"
-$SE $LE_PATCH -DWATCH_OS $WATCH_HTTPD $libdir/orcallator.se &
+$SE $LE_PATCH -DWATCH_OS $WATCH_WEB $libdir/orcallator.se &
# Write the PID of orcallator to a file to make killing easier.
pid=$!
Modified: trunk/orca/config/PerlHead1.in
==============================================================================
--- trunk/orca/config/PerlHead1.in (original)
+++ trunk/orca/config/PerlHead1.in Sat Jul 13 19:22:25 2002
@@ -1,4 +1,4 @@
#!@PERL@ -w # -*- perl -*-
-require 5.005;
+require 5.004_01;
Modified: trunk/orca/config/PerlHead2.in
==============================================================================
--- trunk/orca/config/PerlHead2.in (original)
+++ trunk/orca/config/PerlHead2.in Sat Jul 13 19:22:25 2002
@@ -5,5 +5,5 @@
& eval 'exec perl -w -S $0 $argv:q'
if 0;
-require 5.005;
+require 5.004_01;
Added: trunk/orca/config/aclocal.m4
==============================================================================
--- trunk/orca/config/aclocal.m4 (original)
+++ trunk/orca/config/aclocal.m4 Sat Jul 13 19:22:25 2002
@@ -0,0 +1,53 @@
+dnl config/aclocal.m4 generated automatically by aclocal 1.4
+
+dnl Copyright (C) 1994, 1995-8, 1999 Free Software Foundation, Inc.
+dnl This file is free software; the Free Software Foundation
+dnl gives unlimited permission to copy and/or distribute it,
+dnl with or without modifications, as long as this notice is preserved.
+
+dnl This program is distributed in the hope that it will be useful,
+dnl but WITHOUT ANY WARRANTY, to the extent permitted by law; without
+dnl even the implied warranty of MERCHANTABILITY or FITNESS FOR A
+dnl PARTICULAR PURPOSE.
+
+dnl Check if the requested modules are install in this perl module.
+dnl Do not cache this result, since the user may easy install the
+dnl modules and rerun configure. We do not want to remember if
+dnl the module is not installed.
+dnl BORP_PERL_MODULE(DEFINE, PATH_TO_PERL, MODULE_NAME, MODULE_VERSION,
+dnl [ACTION_IF_FOUND, [ACTION_IF_NOT_FOUND]]
+AC_DEFUN(BORP_PERL_MODULE, [
+ AC_MSG_CHECKING([if Perl module $3 version $4 is installed])
+ if $2 ./config/check_for_perl_mod $3 $4; then
+ $1=yes
+ ifelse([$5], , , [$5])
+ else
+ $1=no
+ ifelse([$6], , , [$6])
+ fi
+ AC_MSG_RESULT([$]$1)
+])
+
+dnl Check if the whole path to Perl can be placed on the #! line
+dnl of a shell script. Some systems have length restrictions
+dnl so some paths to programs may be too long.
+dnl BORP_PERL_RUN(PATH_TO_SHELL [, ACTION-IF-WORKS [, ACTION-IF_NOT]])
+AC_DEFUN(BORP_PERL_RUN, [
+ AC_MSG_CHECKING([if '$1' will run Perl scripts])
+ rm -f conftest.BZ
+ cat > conftest.BZ <<EOF
+#!$1
+
+exit 0
+EOF
+ chmod +x conftest.BZ
+ if ./conftest.BZ 2>/dev/null; then
+ ifelse([$2], , , [$2])
+ AC_MSG_RESULT(yes)
+ else
+ ifelse([$3], , , [$3])
+ AC_MSG_RESULT(no)
+ fi
+ rm -f conftest.BZ
+])
+
Modified: trunk/orca/TODO
==============================================================================
--- trunk/orca/TODO (original)
+++ trunk/orca/TODO Sat Jul 13 19:22:25 2002
@@ -1,22 +1,399 @@
-Other:
+This is a pretty comprehensive to-do list for Orca and the related data
+gathering tools. Any comments and additions to this list are welcome.
+
+To motivate the discussion of this to do list, let me give some background
+on our setup. GeoCities site has over 100 hosts. I've been running
+orcallator.se on some hosts since September 1998 that have stored over
+300 source text files. Currently I have 34472 files using 7.3 gigabytes
+of storage. I have 9 different orcallator.cfg for different classes
+of machines.
+
+* Orca: Load arbitrarily formatted source text files.
+
+ (If this is implemented, then the problems of source text files
+ not containing fields that orcallator.cfg is looking for and
+ Orca complaining will disappear).
+
+ Orca can only handle source text files that have data from one
+ source, such as a host, in a single file. It cannot currently
+ handle source data like this:
+
+ # Web_server time hits/s bytes/s
+ www1 933189273 1304 10324
+ www2 933189273 2545 40322
+
+ I plan on having Orca allow something like this in the configuration
+ file for each group. To read the data above, something like this
+ would be defined. Here the ChangeSubGroup and StoreMeasurement
+ would be macros or subroutines defined elsewhere for Orca users
+ to use.
+
+ group web_servers {
+ upon_open
+ # Do nothing special.
+
+ upon_new_data
+ while (<$fh>) {
+ next if /^#/;
+ my @line = split;
+ next unless (@line == 4);
+ my ($www, $time, $hits_s, $bytes_s) = @line;
+ ChangeSubGroup($www);
+ StoreMeasurement('Hits Per Second', $time, $hits_s);
+ StoreMeasurement('Bytes Per Second', $time, $bytes_s);
+ }
+
+ upon_close
+ # Do nothing special.
+ }
+
+ For the standard orcallator.se output, something like this would
+ be used:
+
+ group orcallator {
+ find_files /usr/local/var/orca/orcallator/(.*)/orcallator-\d+-\d+\d+
+
+ upon_open
+ # Look for the first # line describing the data.
+ while (<$fh>)) {
+ last if /^#/;
+ }
+ @ColumnsNames = split;
+ # Do some searching for the column with the Unix time in it.
+ my $time_column = ....;
+ splice(@ColumnNames, $time_column, 1);
+
+ upon_new_data
+ # Load the new data.
+ while (my $line = <$fh>) {
+ my @line = split;
+ next unless @line == @ColumnNames + 1;
+ my ($time) = splice(@line, $time_column, 1);
+ #
+ StoreMeasurements($time, \@ColumnNames, \@line);
+ }
+
+ }
+
+ The code for each upon_* would be Perl code designed explicitly
+ for the type of source text. This would allow the arbitrary
+ reading of text files, leaving the specifics to the user of
+ Orca.
+
+ This work would also include caching away the type of measurements
+ that each source data file provides. Currently Orca reads the
+ header of each source text file for the names of the columns.
+ With the number of source text files I have, this takes a long
+ time.
+
+* OS independent data gathering tools:
+
+ Many people have been asking for Orca for operating systems other
+ than Solaris, since orcallator.se only runs on Solaris hosts.
+
+ I've given this a little thought and one good solution to this is
+ use other publically available tools that gather host information.
+ The one that came to mind is top (ftp://ftp.groupsys.com/pub/top).
+ Looking at the configure script for top, it runs on the following
+ OSes:
+
+ 386bsd For a 386BSD system
+ aix32 POWER and POWER2 running AIX 3.2.5.0
+ aix41 PowerPC running AIX 4.1.2.0
+ aux3 a Mac running A/UX version 3.x
+ bsd386 For a BSD/386 system
+ bsd43 any generic 4.3BSD system
+ bsd44 For a 4.4BSD system
+ bsd44a For a pre-release 4.4BSD system
+ bsdos2 For a BSD/OS 2.X system (based on the 4.4BSD Lite system)
+ convex any C2XX running Convex OS 11.X.
+ dcosx For Pyramid DC/OSX
+ decosf1 DEC Alpha AXP running OSF/1 or Digital Unix 4.0.
+ dgux for DG AViiON with DG/UX 5.4+
+ dynix any Sequent Running Dynix 3.0.x
+ dynix32 any Sequent Running Dynix 3.2.x
+ freebsd20 For a FreeBSD-2.0 (4.4BSD) system
+ ftx For FTX based System V Release 4
+ hpux10 any hp9000 running hpux version 10.x
+ hpux7 any hp9000 running hpux version 7 or earlier
+ hpux8 any hp9000 running hpux version 8 (may work with 9)
+ hpux9 any hp9000 running hpux version 9
+ irix5 any uniprocessor, 32 bit SGI machine running IRIX 5.3
+ irix62 any uniprocessor, SGI machine running IRIX 6.2
+ linux Linux 1.2.x, 1.3.x, using the /proc filesystem
+ mtxinu any VAX Running Mt. Xinu MORE/bsd
+ ncr3000 For NCR 3000 series systems Release 2.00.02 and above -
+ netbsd08 For a NetBSD system
+ netbsd10 For a NetBSD-1.0 (4.4BSD) system
+ netbsd132 For a NetBSD-1.3.2 (4.4BSD) system
+ next32 any m68k or intel NEXTSTEP v3.x system
+ next40 any hppa or sparc NEXTSTEP v3.3 system
+ osmp41a any Solbourne running OS/MP 4.1A
+ sco SCO UNIX
+ sco5 SCO UNIX OpenServer5
+ sunos4 any Sun running SunOS version 4.x
+ sunos4mp any multi-processor Sun running SunOS versions 4.1.2 or later
+ sunos5 Any Sun running SunOS 5.x (Solaris 2.x)
+ svr4 Intel based System V Release 4
+ svr42 For Intel based System V Release 4.2 (DESTINY)
+ ultrix4 any DEC running ULTRIX V4.2 or later
+ umax Encore Multimax running any release of UMAX 4.3
+ utek Tektronix 43xx running UTek 4.1
+
+ If somebody were to write a tie into top's source code that would
+ generate output every X minutes to a text file or even RRD files,
+ then Orca could put it together into a nice package.
+
+ In line with this di
+ (ftp://ftp.pz.pirmasens.de/pub/unix/utilities/), a freely
+ available disk usage program that could be used to generate disk
+ usage plots loaded by Orca.
+
+* orcallator.se: Dump directly to RRD files.
+
+ A separate RRD file would be created for each measurement.
+ I do not want all the data stored in a single RRD, since people
+ commonly add or remove hardware from the system, which will cause
+ more or less data to be stored. Also, this currently would not
+ work with RRDtool, since you cannot dynamically add more RRAs
+ to a RRD file. Saving each measurement in a separate RRD file
+ removes this issue.
+
+ Pros:
+ 1) Disk space savings. For an old host using over 70
+ megabytes of storage for text output, the RRD files
+ consume 3.5 megabytes of storage.
+ 2) Orca processing time. Orca spends a large amount of
+ kernel and CPU time in finding files and getting
+ the column headers from these files. By storing
+ the data in RRD files, this is no longer a problem.
+ Also, Orca itself would not need to move the data
+ from text to RRD form, speeding up the process of
+ generating the final plots.
+
+ Cons:
+ 1) Potential slowdown in updating the data files.
+ It is easier to write a single text line using fprintf
+ than using rrd_update. What is the impact on a single
+ orcallator.se process?
+ 2) RRDtool format changes and upgrading the data files.
+ Text files do not change, but if RRDtool does change,
+ then the data files will need to be upgraded somehow.
+ 3) Loss of data over time. Due to the consolidation
+ function of RRD, older data will not be as precise
+ in case it needs to be examined.
+ 4) You cannot grep or simply parse the text files for
+ particular data for ad-hoc studies.
+ 5) The RRD creation parameters would be set by
+ orcallator.se and not by Orca, making modifications
+ harder.
+
+ Question: Do the pros outweigh the cons?
+
+ Work to do: Get RRDtool to build a librrd.so that orcallator.se
+ would attach to. The RRDs.so that gets built in the perl-shared
+ directory has RRDs.o included in it with references to Perl
+ variables, so this shared library cannot be loaded by anybody
+ else. Two things can be done. One is to modify the perl-shared
+ Makefile.PL to build a librrd.so without RRDs.so. The other is
+ to somehow make librrd.so with libtool. Either libtool can be
+ integrated into RRDtool as a whole, or probably simpler would
+ be to add another directory to RRDtool and use libtool in there
+ to make the shared library. The first libtool solution would
+ allow RRDtool to make shared libraries on almost any host without
+ requiring that gcc be installed, since currently the Makefile's
+ look for gcc to use the -fPIC flags.
+
+* Orca: Potentially use Cricket's configuration ConfigTree Module.
+
+ Given more complex Orca installations where many different Orca
+ configuration files are used, maintaining them will start to be
+ complicated. For example, in Yahoo!/GeoCities I have 9 different
+ configurations to split up the hosts for our site I found that
+ for the number of hosts and the number of data files require this
+ for reasonable generating of the resulting HTML and PNG files.
+
+ It looks like using ConfigTree would allow Orca to use the same
+ inheritance that Cricket uses. I don't know enough about the
+ Cricket config tree setup to know if it would work well with Orca.
+
+ Work to do: Review the ConfigTree code.
+
+* Orca: Allow different group sources in the same plot.
+
+ Currently Orca only allows data sources from one group. Expand
+ the code to list the group in each data line. Initially, however
+ only data from one group would be allowed in one data statement.
+
+* Orca: Put the last update time for each host in an HTML file somewhere.
+
+ This could be done simply up updating a file that gets included
+ by the real HTML file. This way the main HTML files do not
+ have to get rewritten all the time. On large installations,
+ writing the HTML files is lengthy.
+
+* Orca: Turn off HTML creation via command line option.
+
+ Add a command line option to turn off HTML file creation.
+
+* Orca: Update the HTML files is new data is found.
+
+ Currently Orca will only update the HTML files if new source
+ files are found, but not if new data in existing files is found.
+ Change this.
+
+* orcallator.se: Put HTTP proxy and caching statistics into orcallator.cfg.
+
+ Since orcallator.se measures HTTP proxy and caching statistics,
+ update orcallator.cfg.in to display these data sets.
+
+* Orca:
+
+ Do what it takes to remove the same Ethernet port listings in
+ orcallator.cfg.in. They seem redundant, but are not totally,
+ since different interfaces have different maximum data transfer
+ rates.
+
+* Orca:
+
+ Add some error checking code for the maximum number of available
+ colors so undefined errors do not arise.
+
+* Other:
Mention the AIX tool nmon.
-Orca:
- Come up with a better error scheme than using warn() for some
- errors and the email warn for others.
- Do something better if the number of columns changes in a single
- orcallator file.
- Have a scheme were at any point of time a data file may add or
- change the number of columns of data and column names.
- Lock file.
- Arbitrary date reading.
- Use Cricket's configuration ConfigTree?????
- More configuration file defaults.
- Better date loading support.
- Make plots from multiple files sets: delete source files_key and put
- it into data.
- Update HTML files if a new file is found with a new group.
- Update orcallator.cfg.in for caching and proxy stuff.
-orcallator.se:
- Better documentation.
+
+
+
+Some notes from Paul Company <paul.company at plpt.com>:
+
+I'd create one graph for each and autoscale. That way you have system
+processes, httpd processes and a combination. All your bases are covered.
+
+Presenting data in a useful, meaningful way is an artform & is very
+difficult. What makes it an artform is the definitions of useful and
+meaningful are subjective. General rules of thumb:
+ + Know your audience.
+ + Know what you're measuring and why (definitions/goals).
+ + Know what the measurements mean.
+ aka., know what is good/bad/acceptable (limits/thresholds).
+ + Know how you're measuring.
+ aka., how reliable is your measurement?
+
+Graphs are just one way of presenting data.
+
+I definitely don't claim to be an expert of any kind in this area,
+but here are my preferences.
+
+Most resources have a max and min limit (this defines your range). I like
+to see the max value at the top of the y-axis and the min at the bottom.
+
+Most resources have a usage pattern (this defines your scale). I like
+to see autoscaling graphs which dynamically modify the top & bottom to
+show the points of activity. This is useful when the range is huge and
+the activity is localized to a small band within the huge range.
+
+For example,
+
+We have a Fractional T1 (384Kbps with 50% CIR) at my company and I use
+mrtg to monitor it. Because the range is small I use a fixed min & max,
+and a fixed linear scale of 1.
+ (1) x-axis is time
+ y-axis is Kbps w/ min=0
+ max=384kb
+ scale=1Kbps (linear)
+
+Assume a web site gets anywhere from 0 hits/second to 10Million hits
+per second. I would want multiple graphs depending on usage pattern:
+ (1) x-axis is time
+ y-axis is Kbps w/ min=0
+ max=10Million
+ scale=1Hit/s (linear)
+
+ (2) x-axis is time
+ y-axis is Kbps w/ min=0
+ max=10Million
+ scale=1-1000Hit/s (logarithmic)
+
+ (3) x-axis is time
+ y-axis is Kbps w/ min={min value of lowest usage, usually zero}
+ max={max value of highest usage}
+ scale=auto (multiple graphs with
+ appropriate range/scale)
+
+Bottom line is it would be nice if one could modify the various attributes
+of the graphs (range, scale, labels, title, ...) interactively. I realize
+the orca graphs are generated fixed PNG files. Just dreaming.
+
+Here are the things I'd like modified in orca, for my (audience)
+specific use.
+ + Definitions for all Data Sets which includes what is good and bad.
+ For example, is a 15-20% collision rate bad?
+ What is a Disk Busy Measure and is 95 a bad number?
+ Also, suggestions for what to do if things are bad.
+ + Finer resolution (more graphs) on each Data Set.
+ For example, a graph for each disk, when you click on that graph
+ you get a graph for each:
+ Bytes read
+ Bytes written
+ KBytes read
+ KBytes written
+ Average # of transactions waiting
+ Average # of transactions serviced
+ Reads issued
+ Writes issued
+ Average service time (milliseconds)
+ % of time spent waiting for service
+ % of time spent busy
+ + The ability to set thresholds (like virtual_adiran.se, zoom.se
+ or pure_test.se) and have those thresholds graphed as color
+ changes (red, amber, green etc.)
+ + System & Httpd Processes
+ Have 3 separate graphs as mentioned above.
+ + CPU Usage
+ Have multiprocessor support - separate graphs for each processor,
+ in addition to the combined graph.
+ + Packets Per Second
+ I'd like to know the definition of packet and maybe the
+ ave,min,max size. And maybe the packet types (IP, TCP,
+ UDP, ICMP).
+ + Errors Per Second
+ Possibly a graph per error type.
+ + Nocanput Rate
+ I'd like to know the definition of nocanput.
+ What is the unit on the y-axis. What does 3m mean?
+ + Collisions
+ Deferred Packet Rate
+ TCP Bits Per Second
+ TCP Segments Per Second
+ TCP Retransmission & Duplicate Received Percentage
+ TCP New Connection Rate
+ TCP Number Open Connections
+ TCP Reset Rate
+ TCP Attempt Fail Rate
+ TCP Listen Drop Rate
+ Should all be per interface AND cumulative.
+ + Page Residence Time
+ What's a good/bad/acceptable number?
+ How do you read this graph and relate it to pageouts
+ or swap thrashing.
+ Maybe we should plot the sr field of vmstat. p.329
+ + Page Usage
+ Unless you know how memory subsystems work,
+ this graph is hard to read. The only obvious
+ thing this graph tells you is if the Free List
+ is too small. The Free List can be fine, but you
+ can still have performance problems!
+ The Other & System Total labels are useless.
+ Detecting a swap problem (thrashing) would be more useful!
+ + Pages Locked & IO
+ This maps directly to the kernel page usage above.
+ It is redundant and therefore useless.
+ + Bits Per Second: <interface>
+ Doesn't work for all OS versions and/or NICs.
+ + Web Server Hit Rate, Web Server File Size, Web Server Data
+ Transfer Rate,
+ Web Server HTTP Error Rate
+ All don't seem to work!
+ Probably my fault, I'll take a closer look.
Modified: trunk/orca/INSTALL
==============================================================================
--- trunk/orca/INSTALL (original)
+++ trunk/orca/INSTALL Sat Jul 13 19:22:25 2002
@@ -1,191 +1,158 @@
Outline:
-1) Install Perl 5.005_0[23].
+ 1) Install Perl 5.004_01 or later.
-2) Decide where Orca's binaries, RRD, HTML, and orcallator directories
- will reside. Make sure performance concerns are handled.
+ 2) Decide where Orca's binaries, RRD, HTML, and orcallator directories
+ will reside. Make sure performance concerns are handled.
-3) Configure Orca.
+ 3) Determine which Perl modules need compiling and installing.
+ Optionally download newer versions of these modules.
-4) Make, test and install necessary Perl modules.
- b) Install Digest::MD5 version 2.00 or greater.
- a) Install Math::IntervalSearch version 1.00 or greater.
- c) Install RRDs version 0.99.29 or greater.
- d) Install Storable 0.603 or greater.
+ 4) Configure Orca.
-5) Make Orca.
+ 5) Make Orca and any necessary Perl modules.
-6) Install Orca.
+ 6) Test if the Perl modules properly compiled.
-7) [Optional] Install orcallator.
- a) If required, rename all percollator, percol and perc filenames to
- orcallator.
- b) Install the SE toolkit.
- c) Apply a patch to the SE 3.0 toolkit.
- d) Examine Orca/orcallator programs.
- e) Run start_orcallator on all systems.
- f) Edit orcallator.cfg.
- g) Run Orca.
+ 7) Doing an upgrade from Orca 0.23 or older? Follow these steps.
+ 8) Install any necessary Perl modules.
+ 9) Install Orca.
+10) Set the Unix process file descriptor limit to at least 256.
-1) Install Perl 5.005_0[23].
+11) [Solaris Only and Optional] Install orcallator.
+ a) Install the SE toolkit.
+ b) Apply a patch to the SE 3.0 toolkit if necessary.
+ c) Examine Orca/orcallator programs.
+ d) Run start_orcallator on all systems.
+ e) Edit orcallator.cfg.
- I have used only version 5.005_0[23] of Perl with Orca. Because Orca
- makes very heavy use of references, it may or may not work with older
- versions of Perl. I welcome feedback if Orca works with older Perls.
+12) Run Orca.
- This step is too large to go into here. The bottom line is to follow
- the instructions at
- http://language.perl.com/info/software.html
-2) Decide where Orca's binaries, RRD, HTML, and orcallator directories
- will reside. Make sure performance concerns are handled.
- First choose the location where Orca will be installed. By default,
- Orca will install into the following structure:
+ 1) Install Perl 5.004_01 or later.
- $prefix Prefix, set with --prefix=
- $prefix/bin Binaries, set with --bindir=
- $prefix/lib Libraries, set with --libdir=
- $prefix/man Manual pages, set with --mandir=
- $prefix/var/orca/rrd RRD directory, set with --with-rrd-dir
- $prefix/var/orca/orcallator Orcallator directory, set with --with-orcallator-dir
+ This step is too large to go into here. The bottom line is to follow
+ the instructions at
- By default $prefix is set to /usr/local. The -- arguments shown
- above should be given to the configure script described below which
- configures Orca. If you want to change the installation location of
- Orca, say into /opt/orca, you would do so by passing --prefix=/opt/orca
- to the configure script.
+ http://language.perl.com/info/software.html
- Because Orca is extremely IO intensive, I recommend that the host
- that locally mounts the web server content be the same machine that
- runs Orca. In addition, the RRD data files that Orca uses also
- require a good amount of IO. The machine running Orca should always
- have the $prefix/var/rrd directory locally mounted. For performance
- concerns it is more important this directory be locally stored than
- HTML directory where the resulting HTML and GIF files are written.
+ 2) Decide where Orca's binaries, RRD, HTML, and orcallator directories
+ will reside. Make sure performance concerns are handled.
- If you are going to use the orcallator Orca addon to monitor your
- Sun Solaris systems, then you will in addition need to decide where
- to have orcallator store its data files. By default, these data
- files are written to once every 5 minutes, so IO is not an issue.
- The issue here is that orcallator needs to run as root and all of
- the orcallator output files from all your hosts need to be written
- into the same NFS shared directory that Orca can read. It is not too
- important that the directory that orcallator writes into be mounted
- locally on the machine that Orca will run on, since Orca will only
- read each file every five minutes.
+ First choose the location where Orca will be installed. By default,
+ Orca will install into the following structure:
- If you are running orcallator on a system running a web, proxy, or
- Squid server, you can have orcallator watch the logs generated by
- these programs. In this case, note the location of the log file for
- the configure script.
+ $prefix Prefix, set with --prefix=
+ $prefix/bin Binaries, set with --bindir=
+ $prefix/lib Libraries, set with --libdir=
+ $prefix/man Manual pages, set with --mandir=
+ $prefix/var/orca/rrd RRD directory, set with --with-rrd-dir
+ $prefix/var/orca/orcallator Orcallator directory, set with --with-orcallator-dir
-3) Configure Orca.
+ The HTML output directory is not set by default and must be specified
+ by the Orca administrator.
- Now that you have decided where the RRD, HTML, and optionally the
- orcallator data files and the web server access logs, are located,
- run the configure script with the following arguments:
+ By default $prefix is set to /usr/local. The -- arguments shown
+ above should be given to the configure script described below
+ which configures Orca. If you want to change the installation
+ location of Orca, say into /opt/orca, you would do so by passing
+ --prefix=/opt/orca to the configure script.
- % ./configure --prefix=ORCA_PREFIX_DIRECTORY \
- --with-rrd-dir=RRD_DIR_LOCATION \
- --with-html-dir=HTML_DIR_LOCATION \
- --with-orcallator-dir=ORCALLATOR_DIR_LOCATION \
- --with-TYPE-log=LOG_LOCATION
+ Because Orca is extremely IO intensive, I recommend the following
+ architecture. Choose one host that can locally mount all the RRD data
+ files and the directory containing the HTML and image files that are
+ viewed by Orca users. If these two locations must be on separate
+ hosts and one directory must be NFS mounted to the Orca host, then
+ I recommend that the RRD data file be local instead of the HTML and
+ image files, since the RRD files are much more read/write intensive.
- If you choose nothing else, the --with-html-dir must always be used.
+ If you are going to use the orcallator Orca addon to monitor your
+ Sun Solaris systems, then you will in addition need to decide where
+ to have orcallator store its data files. By default, these data
+ files are written to once every 5 minutes, so IO is not an issue.
+ The issue here is that orcallator needs to run as root and all of
+ the orcallator output files from all your hosts need to be written
+ into the same NFS shared directory that Orca can read. It is not too
+ important that the directory that orcallator writes into be mounted
+ locally on the machine that Orca will run on, since Orca will only
+ read each file every five minutes.
- If you use a web, proxy, or Squid server, you can have orcallator
- gather statistics from the log file. Use this table to decide which
- configure option to use:
+ If you are running orcallator on a system running a web, proxy,
+ or Squid server, you can have orcallator watch the logs generated
+ by these programs. In this case, note the location of the log file
+ for the configure script.
- Log Type Configure Option
- -----------------------------------------------------------------
- NCSA/Common Log Format --with-ncsa-log=FILE
- Common Log Format with Proxy Information --with-proxy-log=FILE
- Squid Log Format --with-squid-log=FILE
+ 3) Determine which Perl modules need compiling and installing.
+ Optionally download newer versions of these modules.
- Configure will let you use only one of these --with-TYPE-log options.
+ Orca requires the following Perl modules at the specified versions:
- The configure script will find where your version of Perl and some
- other assorted programs are located. It will also determine if you
- have the necessary Perl modules to run Orca. If it does not find
- the required modules, the modules that are included with the Orca
- distribution will be built.
+ Name Required Version Included With Orca
+ -----------------------------------------------------------------------
+ Data::Dumper 2.101 or greater 2.101
+ Digest::MD5 2.00 or greater 2.09
+ Math::IntervalSearch 1.05 or greater 1.05
+ RRDs 1.0.7.2 or greater 1.0.7.2
+ Storable 0.6.3 or greater 0.6.5
-4) Make, test and install necessary Perl modules.
+ All five of these modules are included with the Orca distribution
+ in the packages directory. When you configure Orca in step 4),
+ configure will determine if you need any of these modules compiled
+ and installed. configure will then modify the packages/Makefile
+ file to only build those modules that need to be installed.
- Orca requires the following modules at the specified version number:
+ All of the modules except for Math::IntervalSearch require a compiler
+ and generate shared libraries by default.
- Name Required Version Included With Orca
- -----------------------------------------------------------------------
- Digest::MD5 version 2.00 or greater 2.07
- Math::IntervalSearch 1.00 or greater 1.02
- RRDs version 0.99.29 or greater 0.99.31
- Storable 0.603 or greater 0.603
+ If you wish to download and install modules that have been updated
+ since this Orca package has been assembled, please use the following
+ information.
- All four of these modules are included with the Orca distribution
- in the packages directory. When you configured Orca in step 3),
- configure should have determined if you need any of these modules
- installed in the version of Perl that configure found. configure will
- then modify the packages/Makefile file to only build those modules
- that need to be installed.
+ Data::Dumper
- All of the modules except for Math::IntervalSearch require a compiler
- and generate shared libraries by default.
+ http://www.perl.com/CPAN/authors/id/GSAR/Data-Dumper-2.101.tar.gz
- To make these modules run the following command:
-
- % make modules [ To optimize: make modules CFLAGS=-O ]
-
- To test if the modules are working properly, run the following command:
-
- % make test_modules
-
- To automatically install these modules into Perl run the following
- command:
-
- % make install_modules
-
- If you want newer version of these modules, get them from the following
- locations:
+ % gunzip -c Data-Dumper-2.101.tar.gz | tar xvf -
+ % cd Data-Dumper-2.101
+ % perl Makefile.PL
+ % make
+ % make test
+ % make install
- Math::IntervalSearch
+ Digest::MD5
- ftp://ftp.gps.caltech.edu/pub/blair/Perl/Math-Interpolate-1.02.tar.gz
- http://www.perl.com/CPAN/authors/id/B/BZ/BZAJAC/Math-Interpolate-1.02.tar.gz
+ http://www.perl.com/CPAN/authors/id/GAAS/Digest-MD5-2.09.tar.gz
- % gunzip -c Math-Interpolate-1.02.tar.gz | tar xvf -
- % cd Math-Interpolate-1.02
+ % gunzip -c Digest-MD5-2.09.tar.gz | tar xvf -
+ % cd Digest-MD5-2.09
% perl Makefile.PL
% make
% make test
% make install
- Digest::MD5
+ Math::IntervalSearch
- http://www.perl.com/CPAN/authors/id/GAAS/Digest-MD5-2.07.tar.gz
+ ftp://ftp.gps.caltech.edu/pub/blair/Perl/Math-Interpolate-1.05.tar.gz
+ http://www.perl.com/CPAN/authors/id/B/BZ/BZAJAC/Math-Interpolate-1.05.tar.gz
- % gunzip -c Digest-MD5-2.07.tar.gz | tar xvf -
- % cd Digest-MD5-2.07
+ % gunzip -c Math-Interpolate-1.05.tar.gz | tar xvf -
+ % cd Math-Interpolate-1.05
% perl Makefile.PL
% make
% make test
% make install
- RRDs
+ RRDs
http://ee-staff.ethz.ch/~oetiker/webtools/rrdtool/pub/
RRDs is the Perl module that comes with RRDtool, a package written
- by Tobias Oetiker. RRDtool contains a copy of the gd1.2 library
- and is installed automatically into the RRDtool shared library that
- is used by the RRDs Perl module. While gd1.3 exists, according
- to its README file it generates larger GIFs. I see no reason to
- link RRDtool with gd1.3.
+ by Tobias Oetiker.
% gunzip -c rrdtool-?.??.??.tar.gz | tar xvf -
% cd rrdtool-?.??.??
@@ -198,92 +165,232 @@
For large installations, I recommend that RRDs be compiled with
optimization turned on.
-5) Make Orca.
+ Storable
- Build Orca by running the following command:
+ http://www.perl.com/CPAN/authors/id/RAM/Storable-0.6.5.tar.gz
- % make
+ % gunzip -c Storable-0.6.5.tar.gz | tar xvf -
+ % cd Storable-0.6.5
+ % perl Makefile.PL
+ % make
+ % make test
+ % make install
-6) Install Orca.
+ 4) Configure Orca.
- Between version 0.20 and 0.21 of Orca, a major name change occurred in
- all of the installed and generated files. Any filenames containing
- percollator, percol, and perc had the name orcallator substituted in
- place. Percollator.se has been renamed to orcallator.se and its output
- files are now named orcallator. The default percollator.cfg has been
- renamed to orcallator.cfg and the version of orcallator.cfg included
- here now looks for data filenames of the form orcallator-1999-05-08
- and percol-1999-05-8. If you are running an Orca installation 0.20 or
- older and want to rename all of the files and directories in your Orca
- installation to the new scheme, then kill any running percollator.se's
- and Orca processes. Then run
+ Now that you have decided where the RRD, HTML, and optionally the
+ orcallator data files and the web server access logs, are located,
+ run the configure script with the following arguments:
+
+ % ./configure --prefix=ORCA_PREFIX_DIRECTORY \
+ --with-rrd-dir=RRD_DIR_LOCATION \
+ --with-html-dir=HTML_DIR_LOCATION \
+ --with-orcallator-dir=ORCALLATOR_DIR_LOCATION \
+ --with-TYPE-log=LOG_LOCATION
+
+ If you choose nothing else, the --with-html-dir must always be used,
+ otherwise configure will fail.
+
+ If you use a web, proxy, or Squid server, you can have orcallator
+ gather statistics from the log file. Use this table to decide which
+ configure option to use:
+
+ Log Type Configure Option
+ -----------------------------------------------------------------
+ NCSA/Common Log Format --with-ncsa-log=FILE
+ Common Log Format with Proxy Information --with-proxy-log=FILE
+ Squid Log Format --with-squid-log=FILE
+
+ Configure will let you use only one of these --with-TYPE-log options.
+
+ The configure script will find where your version of Perl and some
+ other assorted programs are located. It will also determine if you
+ have the necessary Perl modules to run Orca. If it does not find
+ the required modules, the modules that are included with the Orca
+ distribution will be built.
+
+ Configure will also determine if you run one of the operating
+ systems where a shared librrd.so library will be built and installed
+ in $libdir.
+
+ 5) Make Orca and any necessary Perl modules.
+
+ To make Orca and these Perl modules run the following command:
+
+ % make [ To optimize: make CFLAGS=-O or CFLAGS=-O3 ]
+
+ 6) Test if the Perl modules properly compiled.
+
+ To check if the Perl modules were properly compiled run the following
+ command:
+
+ % make test_modules
+
+ 7) Doing an upgrade from Orca 0.23 or older? Follow these steps.
+
+ Due to various changes to Orca between releases, many of the RRD,
+ HTML and image filenames that Orca creates have changed names.
+ Two separate issues exist.
+
+ The first is that the naming scheme for all generated HTML and image
+ (either PNG or GIF) files have changed. Unless you want to leave
+ files with old names around wasting disk space, I recommend you cd
+ into your HTML directories and delete all files there.
+
+ The second issue is that the RRD data files have also changed names
+ and unless you want to reload all of your source data files and
+ waste more disk space on unused RRD files, I suggest that you run
+ following command:
+
+ % make upgrade
+
+ This will look through the all of the directories that Orca will
+ install into and use (namely the $prefix, $exec_prefix, $bindir,
+ $libdir, $ORCALLATOR_DIR, and $RRD_DIR directories) and perform any
+ necessary file renaming.
+
+ If you have some new directories that are not included in the above
+ list directories that make upgrade will cover, you can run the
+ src/upgrage_installation program with a list of directories to parse
+ and rename. If you want to see what upgrage_installation will rename
+ without actually doing the rename, give it the -n option before any
+ directory names.
+
+ Here is a description of the various differences between versions.
+
+ 0.23 -> 0.24
+
+ The following substitutions are now done to create any RRD, HTML and
+ image files.
+
+ orcallator -> o
+ orca -> o
+ _percent -> _pct
+ _number -> _num
+ _times -> _X
+ # -> _num_
+ * -> _X_
+
+ 0.20 -> 0.21
+
+ Between version 0.20 and 0.21 of Orca, a major name change occurred
+ in all of the installed and generated files. Any filenames
+ containing percollator, percol, and perc had the name orcallator
+ substituted in place. Filenames containing the word percent are
+ properly protected and will not be renamed to contain the word
+ orcallatorent. Percollator.se has been renamed to
+ orcallator.se and its output files are now named orcallator.
+ The default percollator.cfg has been renamed to orcallator.cfg
+ and the version of orcallator.cfg included here now looks for data
+ filenames of the form orcallator-1999-05-08 and percol-1999-05-8.
+ If you are running an Orca installation 0.20 or older and want to
+ rename all of the files and directories in your Orca installation
+ to the new scheme, then kill any running percollator.se's before
+ installing and running the following commands
- % make migrate
+ 8) Install any necessary Perl modules.
- This will look through the all of the directories that Orca will
- install into and use (namely the $prefix, $exec_prefix, $bindir,
- $libdir, $ORCALLATOR_DIR, and $RRD_DIR directories) and perform any
- necessary file renaming. Filenamess containing the word percent will
- not have be modified to contain the word orcallatorent.
+ To automatically install these modules into Perl run the following
+ command:
+
+ % make install_modules
+
+ 9) Install Orca.
+
+ Run the following command to install Orca:
+
+ % make install
+
+ This may also install librrd.so in your $libdir.
+
+10) Set the Unix process file descriptor limit to at least 256.
+
+ Orca is designed to run with a minimum of 256 available file
+ descriptors. If under Unix your processes have a file descriptor
+ limit less than 256, then Orca may possibly fail.
- Now run the following command to install Orca:
+ To check your default file descriptor limit, on many Unix systems
+ you can run
- % make install
+ % ulimit -n
-7) [Optional] Install orcallator.
- a) Install the SE toolkit.
+ If this number is less than 256, then you'll need to increase your
+ limit. This can be accomplished using the limit or ulimit command
+ in your shell, depending upon which shell you use. See man (1)
+ ulimit or man (1) limit.
- Perform the installation instructions as listed on the web page
+ If you are running a C shell equivalent, such as csh or tcsh, then
+ you can run
+
+ % limit descriptors 256
+
+ If you have a Bourne shell equivalent, such as sh, ksh, bash, you
+ can run
+
+ % ulimit -n 256
+
+ This command should either be placed in a .login, .profile, .cshrc
+ file or run just before Orca is run.
+
+11) [Solaris Only and Optional] Install orcallator.
+ a) Install the SE toolkit.
+
+ Perform the installation instructions as listed on the web page
http://www.sun.com/sun-on-net/performance/se3/
- If you are running 2.6 or greater, then download SE 3.1 or greater.
- Otherwise you will need SE 3.0.
+ If you are running 2.6 or greater, then download SE 3.1 or greater.
+ Otherwise you will need SE 3.0.
- b) Apply a patch to the SE 3.0 toolkit. If you are running any other
- release of SE, then do not install the patch.
+ b) Apply a patch to the SE 3.0 toolkit. If you are running any other
+ release of SE, then do not install the patch.
- By default the SE toolkit will install into /opt/RICHPse.
- Run this command:
+ By default the SE toolkit will install into /opt/RICHPse.
+ Run this command:
- % cd /opt/RICHPse
- % patch -s < THIS_DIR/patches/p_netstat_class.se.diff
+ % cd /opt/RICHPse % patch -s <
+ THIS_DIR/patches/p_netstat_class.se.diff
- c) Examine Orca/orcallator programs.
+ c) Examine Orca/orcallator programs.
- Orca's installation scripts also installs several programs and
- configuration files necessary to have Orca monitor many different
- statistics of your Sun Solaris systems.
+ Orca's installation scripts also installs several programs and
+ configuration files necessary to have Orca monitor many different
+ statistics of your Sun Solaris systems.
- The following tools are installed in the $prefix/bin directory:
+ The following tools are installed in the $prefix/bin directory:
start_orcallator - start orcallator on a system
- stop_precol - stop orcallator on a system
+ stop_orcallator - stop orcallator on a system
restart_orcallator - restart orcallator on a system
orcallator_column - print selected columns from orcallator output
orcallator_running - run to see if any orcallators are not running
- d) Run start_orcallator on all systems.
+ d) Run start_orcallator on all systems.
+
+ Log in as root on all the systems you want to watch and run:
+
+ % $prefix/bin/start_orcallator
- Log in as root on all the systems you want to watch and run:
+ Orcallator will not generate an output data file until the first
+ update interval, which will be between 2.5 to 7.5 minutes after
+ orcallator is started.
- % $prefix/bin/start_orcallator
+ e) Edit orcallator.cfg.
- Orcallator will not generate an output data file until the first
- update interval, which will be between 2.5 to 7.5 minutes after
- orcallator is started.
+ You need to edit the installed orcallator.cfg file and remove
+ all unneeded references. In particular, you'll want to change
+ warn_email, which is the email address that receives emails when
+ orcallator generated files are out of date, which may signify a
+ orcallator program that has died and is no longer gathering data.
- e) Edit orcallator.cfg.
+12) Run Orca.
- You need to edit the installed orcallator.cfg file and remove
- all unneeded references. In particular, you'll want to change
- warn_email, which is the email address that receives emails when
- orcallator generated files are out of date, which may signify a
- orcallator program that has died and is no longer gathering data.
+ Log into the system that will run Orca and run the command:
- f) Run Orca.
+ % cd $prefix
+ % ./bin/orca -v CONFIG_FILE
- Log into the system that will run Orca and run the command:
+ If you are using orcallator.se, then this command will be
- % cd $prefix
- % ./bin/orca -v lib/orcallator.cfg
+ % cd $prefix
+ % ./bin/orca -v lib/orcallator.cfg
Modified: trunk/orca/lib/Makefile.in
==============================================================================
--- trunk/orca/lib/Makefile.in (original)
+++ trunk/orca/lib/Makefile.in Sat Jul 13 19:22:25 2002
@@ -6,11 +6,7 @@
MKDIR = @MKDIR@
INSTALL = @INSTALL@
-all: orcallator.cfg orca.gif.hex rrdtool.gif.hex
-
-orcallator.cfg: orcallator.cfg.in
- (cd ..; ./config.status)
- $(MAKE)
+all: orca.gif.hex rrdtool.gif.hex
# Create a hex file representation of orca.gif that can be stored inside
# orca.pl.
@@ -23,13 +19,12 @@
perl -e 'while (sysread(STDIN, $$b, 35)){print unpack("h*", $$b),"\n"}' < $< > $@
install: all
- $(MKDIR) $(libdir)
- -cp -p $(libdir)/orcallator.cfg $(libdir)/orcallator.cfg.`date +%Y-%m-%d-%H:%M:%S`
- $(INSTALL) -m 0644 orcallator.cfg $(libdir)
- $(INSTALL) -m 0644 orcallator.se $(libdir)
clean:
- $(RM) orcallator.cfg
distclean: clean
$(RM) Makefile
+
+Makefile: Makefile.in
+ cd .. && CONFIG_FILES=lib/Makefile ./config.status
+ $(MAKE)
Modified: trunk/orca/configure.in
==============================================================================
--- trunk/orca/configure.in (original)
+++ trunk/orca/configure.in Sat Jul 13 19:22:25 2002
@@ -1,7 +1,7 @@
# This file is an input file used by the GNU "autoconf" program to
# generate the file "configure", which is run during Borp installation
# to configure the system for the local environment.
-AC_INIT(src/orca.pl)
+AC_INIT(src/orca.pl.in)
AC_CONFIG_AUX_DIR(config)
AC_CANONICAL_SYSTEM
@@ -11,11 +11,16 @@
AC_SUBST(CONFIGURE_COMMAND_LINE)
# Define the directories containing packages that Orca makes use of here.
-# The path packages gets added where necessary.
-DIGEST_MD5_DIR=Digest-MD5-2.07
-MATH_INTERPOLATE_DIR=Math-Interpolate-1.02
-RRDTOOL_DIR=rrdtool-0.99.31
-STORABLE_DIR=Storable-0.6 at 3
+# The directory name packages where these packages are distributed with
+# Orca gets added where necessary.
+COMPRESS_ZLIB_DIR=Compress-Zlib-1.05
+DATA_DUMPER_DIR=Data-Dumper-2.101
+DIGEST_MD5_DIR=Digest-MD5-2.09
+MATH_INTERPOLATE_DIR=Math-Interpolate-1.05
+RRDTOOL_DIR=rrdtool-1.0.7.2
+STORABLE_DIR=Storable-0.6.5
+AC_SUBST(COMPRESS_ZLIB_DIR)
+AC_SUBST(DATA_DUMPER_DIR)
AC_SUBST(DIGEST_MD5_DIR)
AC_SUBST(MATH_INTERPOLATE_DIR)
AC_SUBST(RRDTOOL_DIR)
@@ -81,7 +86,7 @@
)`
AC_SUBST(ORCALLATOR_DIR)
-WATCH_HTTPD=
+WATCH_WEB=
NCSA_LOG=
AC_ARG_WITH(ncsa-log,
[ --with-ncsa-log=FILE location of the NCSA style web server access log],
@@ -92,7 +97,7 @@
;;
*) WEB_LOG="$withval"
NCSA_LOG=yes
- WATCH_HTTPD="-DWATCH_HTTPD"
+ WATCH_WEB="-DWATCH_WEB"
;;
esac
]
@@ -111,7 +116,7 @@
;;
*) WEB_LOG="$withval"
PROXY_LOG=yes
- WATCH_HTTPD="-DWATCH_HTTPD -DWATCH_PROXY"
+ WATCH_WEB="-DWATCH_WEB -DWATCH_PROXY"
;;
esac
]
@@ -131,13 +136,12 @@
AC_MSG_ERROR([*** You must supply an argument to the --with-squid-log option.])
;;
*) WEB_LOG="$withval"
- WATCH_HTTPD="-DWATCH_HTTPD -DWATCH_SQUID"
+ WATCH_WEB="-DWATCH_WEB -DWATCH_SQUID"
;;
esac
]
)
-
-AC_SUBST(WATCH_HTTPD)
+AC_SUBST(WATCH_WEB)
AC_SUBST(WEB_LOG)
# To get a default CFLAGS for this build, check for a C compiler. This
@@ -146,14 +150,16 @@
AC_PROG_MAKE_SET
AC_ARG_PROGRAM
-AC_PATH_PROGS(AWK,mawk gawk nawk awk)
+AC_PATH_PROG(BZIP2, bzip2)
+AC_PATH_PROG(BUNZIP2, bunzip2)
+AC_PATH_PROG(COMPRESS, compress)
AC_PATH_PROG(CUT, cut, cut)
AC_PATH_PROG(EXPR, expr, expr)
-AC_PATH_PROG(SE, se,,/opt/RICHPse/bin:$PATH)
-AC_PATH_PROG(UNAME, uname, uname)
+AC_PATH_PROG(GZIP, gzip)
+AC_PATH_PROG(GUNZIP, gunzip)
+AC_PATH_PROGS(AWK, mawk gawk nawk awk)
# Include the file that defines BORP_PERL_RUN.
-sinclude(config/acinclude.m4)
AC_PATH_PROG(PERL, perl, NOT_FOUND)
if test "x$PERL" = "xNOT_FOUND"; then
AC_MSG_ERROR([*** Perl not found. Please install Perl. See INSTALL how to do this.])
@@ -163,46 +169,128 @@
PERL_HEAD="../config/$PERL_HEAD"
AC_SUBST(PERL_HEAD)
-# Check for necessary Perl modules.
+AC_PATH_PROG(UNAME, uname, uname)
+AC_PATH_PROG(UNCOMPRESS, uncompress)
+
+# Determine the correct flags to compress files depending upon the compress
+# programs available.
+COMPRESSOR=
+UNCOMPRESSOR_PIPE=
+if test -n "$BZIP2" -a -n "$BUNZIP2"; then
+ COMPRESSOR="$BZIP2 -9"
+ UNCOMPRESSOR_PIPE="$BUNZIP2 -c"
+elif test -n "$GZIP" -a -n "$GUNZIP"; then
+ COMPRESSOR="$GZIP -9"
+ UNCOMPRESSOR_PIPE="$GUNZIP -c"
+elif test -n "$COMPRESS" -a -n "$UNCOMPRESS"; then
+ COMPRESSOR="$COMPRESS"
+ UNCOMPRESSOR_PIPE="$UNCOMPRESS -c"
+fi
+AC_SUBST(COMPRESSOR)
+AC_SUBST(UNCOMPRESSOR_PIPE)
+
+# Now we check for those portions of Orca that should be built and set
+# up for installation. The first step is to check for operating system
+# specific modules. For Solaris hosts, orcallator.se is built and
+# installed. This requires the additional building of a librrd.so
+# installed in libdir. Then we check for the proper Perl modules.
+AC_MSG_CHECKING([for solaris host for orcallator install])
+case "$target" in
+ *-solaris*)
+ BUILD_ORCALLATOR=yes
+ ORCALLATOR_SUBDIR=orcallator
+ # Add --enable-shared to the configure options for RRDtool if it is
+ # not already declared.
+ expr "$CONFIGURE_COMMAND_LINE" : "--enable-shared" >/dev/null 2>&1 || CONFIGURE_COMMAND_LINE="$CONFIGURE_COMMAND_LINE --enable-shared"
+
+ INSTALL_LIB_RRDTOOL=install_lib_rrdtool
+ MAKE_RRDTOOL=make_rrdtool
+ TEST_RRDTOOL=test_rrdtool
+ INSTALL_PERL_RRDTOOL=
+ CLEAN_RRDTOOL=clean_rrdtool
+ DISTCLEAN_RRDTOOL=distclean_rrdtool
+ ;;
+ *)
+ INSTALL_LIB_RRDTOOL=
+ BUILD_ORCALLATOR=no
+ ORCALLATOR_SUBDIR=
+ ;;
+esac
+AC_SUBST(ORCALLATOR_SUBDIR)
+AC_SUBST(INSTALL_LIB_RRDTOOL)
+
+AC_MSG_RESULT($BUILD_ORCALLATOR)
+if test "$BUILD_ORCALLATOR" = "yes"; then
+ AC_PATH_PROG(SE, se,,$PATH:/opt/RICHPse/bin)
+fi
+
+dnl BORP_PERL_MODULE(borp_cv_perl_compress_zlib, $PERL, Compress::Zlib, 1.05)
+if test "$borp_cv_perl_compress_zlib" = no; then
+ MAKE_COMPRESS_ZLIB=make_compress_zlib
+ TEST_COMPRESS_ZLIB=test_compress_zlib
+ INSTALL_PERL_COMPRESS_ZLIB=install_perl_compress_zlib
+ CLEAN_COMPRESS_ZLIB=clean_compress_zlib
+ DISTCLEAN_COMPRESS_ZLIB=distclean_compress_zlib
+fi
+AC_SUBST(MAKE_COMPRESS_ZLIB)
+AC_SUBST(TEST_COMPRESS_ZLIB)
+AC_SUBST(INSTALL_PERL_COMPRESS_ZLIB)
+AC_SUBST(CLEAN_COMPRESS_ZLIB)
+AC_SUBST(DISTCLEAN_COMPRESS_ZLIB)
+
+BORP_PERL_MODULE(borp_cv_perl_data_dumper, $PERL, Data::Dumper, 2.101)
+if test "$borp_cv_perl_data_dumper" = no; then
+ MAKE_DATA_DUMPER=make_data_dumper
+ TEST_DATA_DUMPER=test_data_dumper
+ INSTALL_PERL_DATA_DUMPER=install_perl_data_dumper
+ CLEAN_DATA_DUMPER=clean_data_dumper
+ DISTCLEAN_DATA_DUMPER=distclean_data_dumper
+fi
+AC_SUBST(MAKE_DATA_DUMPER)
+AC_SUBST(TEST_DATA_DUMPER)
+AC_SUBST(INSTALL_PERL_DATA_DUMPER)
+AC_SUBST(CLEAN_DATA_DUMPER)
+AC_SUBST(DISTCLEAN_DATA_DUMPER)
+
BORP_PERL_MODULE(borp_cv_perl_digest_md5, $PERL, Digest::MD5, 2.00)
if test "$borp_cv_perl_digest_md5" = no; then
MAKE_DIGEST_MD5=make_digest_md5
TEST_DIGEST_MD5=test_digest_md5
- INSTALL_DIGEST_MD5=install_digest_md5
+ INSTALL_PERL_DIGEST_MD5=install_perl_digest_md5
CLEAN_DIGEST_MD5=clean_digest_md5
DISTCLEAN_DIGEST_MD5=distclean_digest_md5
fi
AC_SUBST(MAKE_DIGEST_MD5)
AC_SUBST(TEST_DIGEST_MD5)
-AC_SUBST(INSTALL_DIGEST_MD5)
+AC_SUBST(INSTALL_PERL_DIGEST_MD5)
AC_SUBST(CLEAN_DIGEST_MD5)
AC_SUBST(DISTCLEAN_DIGEST_MD5)
-BORP_PERL_MODULE(borp_cv_perl_math_interpolate, $PERL, Math::Interpolate, 1.00)
+BORP_PERL_MODULE(borp_cv_perl_math_interpolate, $PERL, Math::Interpolate, 1.04)
if test "$borp_cv_perl_math_interpolate" = no; then
MAKE_MATH_INTERPOLATE=make_math_interpolate
TEST_MATH_INTERPOLATE=test_math_interpolate
- INSTALL_MATH_INTERPOLATE=install_math_interpolate
+ INSTALL_PERL_MATH_INTERPOLATE=install_perl_math_interpolate
CLEAN_MATH_INTERPOLATE=clean_math_interpolate
DISTCLEAN_MATH_INTERPOLATE=distclean_math_interpolate
fi
AC_SUBST(MAKE_MATH_INTERPOLATE)
AC_SUBST(TEST_MATH_INTERPOLATE)
-AC_SUBST(INSTALL_MATH_INTERPOLATE)
+AC_SUBST(INSTALL_PERL_MATH_INTERPOLATE)
AC_SUBST(CLEAN_MATH_INTERPOLATE)
AC_SUBST(DISTCLEAN_MATH_INTERPOLATE)
-BORP_PERL_MODULE(borp_cv_perl_rdds, $PERL, RRDs, 0.99029)
+BORP_PERL_MODULE(borp_cv_perl_rdds, $PERL, RRDs, 1.000072)
if test "$borp_cv_perl_rdds" = no; then
MAKE_RRDTOOL=make_rrdtool
TEST_RRDTOOL=test_rrdtool
- INSTALL_RRDTOOL=install_rrdtool
+ INSTALL_PERL_RRDTOOL=install_perl_rrdtool
CLEAN_RRDTOOL=clean_rrdtool
DISTCLEAN_RRDTOOL=distclean_rrdtool
fi
AC_SUBST(MAKE_RRDTOOL)
AC_SUBST(TEST_RRDTOOL)
-AC_SUBST(INSTALL_RRDTOOL)
+AC_SUBST(INSTALL_PERL_RRDTOOL)
AC_SUBST(CLEAN_RRDTOOL)
AC_SUBST(DISTCLEAN_RRDTOOL)
@@ -210,17 +298,16 @@
if test "$borp_cv_perl_storable" = no; then
MAKE_STORABLE=make_storable
TEST_STORABLE=test_storable
- INSTALL_STORABLE=install_storable
+ INSTALL_PERL_STORABLE=install_perl_storable
CLEAN_STORABLE=clean_storable
DISTCLEAN_STORABLE=distclean_storable
fi
AC_SUBST(MAKE_STORABLE)
AC_SUBST(TEST_STORABLE)
-AC_SUBST(INSTALL_STORABLE)
+AC_SUBST(INSTALL_PERL_STORABLE)
AC_SUBST(CLEAN_STORABLE)
AC_SUBST(DISTCLEAN_STORABLE)
-
# Define the INSTALL and MKDIR variables to point to the scripts in
# the config directory.
INSTALL="../config/install-sh -c"
@@ -230,29 +317,34 @@
#--------------------------------------------------------------------
# Generate the Makefiles and shell scripts with the
-# variable substitution.
+# variable substitutions.
#--------------------------------------------------------------------
+if test "$BUILD_ORCALLATOR" = "yes"; then
+ ORCALLATOR_OUTPUT="orcallator/orcallator.cfg
+ orcallator/orcallator_running.pl
+ orcallator/restart_orcallator.sh
+ orcallator/start_orcallator.sh
+ orcallator/stop_orcallator.sh
+ orcallator/Makefile"
+fi
+
AC_OUTPUT(config/PerlHead1
config/PerlHead2
+ lib/Makefile
packages/Makefile
- src/orcallator_running.pl
- src/restart_orcallator.sh
- src/start_orcallator.sh
- src/stop_orcallator.sh
+ src/orca.pl
src/Makefile
- lib/orcallator.cfg
- lib/Makefile
+ $ORCALLATOR_OUTPUT
docs/Makefile
Makefile)
-if test "$borp_cv_perl_rdds" != "yes"; then
- echo ""
- echo "Running configure in packages/$RRDTOOL_DIR to create RRDtool and RRDs.pm."
- echo ""
- echo "(cd packages/$RRDTOOL_DIR; ./configure $CONFIGURE_COMMAND_LINE --cache-file=../../config.cache)"
- echo ""
- (cd packages/$RRDTOOL_DIR; ./configure $CONFIGURE_COMMAND_LINE --cache-file=../../config.cache)
-fi
+command="(cd packages/$RRDTOOL_DIR; ./configure $CONFIGURE_COMMAND_LINE --cache-file=../../config.cache)"
+echo ""
+echo "Running configure in packages/$RRDTOOL_DIR to create RRDtool and RRDs.pm."
+echo ""
+echo $command
+echo ""
+eval $command
if test -z "$WEB_LOG"; then
AC_MSG_WARN([*** Unless you use a --with-*-log option you will not gather WWW log data.])
Modified: trunk/orca/src/Makefile.in
==============================================================================
--- trunk/orca/src/Makefile.in (original)
+++ trunk/orca/src/Makefile.in Sat Jul 13 19:22:25 2002
@@ -8,11 +8,11 @@
PERL_HEAD = @PERL_HEAD@
ORCALLATOR_DIR = @ORCALLATOR_DIR@
RRD_DIR = @RRD_DIR@
-PERL_SCRIPTS = orcallator_running orca orcallator_column
-SHELL_SCRIPTS = restart_orcallator stop_orcallator start_orcallator
+PERL_SCRIPTS = orca upgrade_installation
+SHELL_SCRIPTS =
TARGETS = $(PERL_SCRIPTS) $(SHELL_SCRIPTS)
-all: $(TARGETS) migrate_to_orcallator
+all: $(TARGETS)
install: all
$(MKDIR) $(bindir)
@@ -21,14 +21,14 @@
$(INSTALL) $$file $(bindir); \
done
-migrate: migrate_to_orcallator
- ./migrate_to_orcallator $(prefix) $(exec_prefix) $(bindir) $(libdir) $(ORCALLATOR_DIR) $(RRD_DIR)
+upgrade: upgrade_installation
+ ./upgrade_installation $(prefix) $(exec_prefix) $(bindir) $(libdir) $(ORCALLATOR_DIR) $(RRD_DIR)
clean:
$(RM) $(TARGETS)
distclean: clean
- $(RM) *.sh orcallator_running.pl Makefile
+ $(RM) *.sh orca.pl Makefile
.SUFFIXES: .pl .sh
@@ -41,21 +41,8 @@
chmod 0755 $@
Makefile: Makefile.in
- (cd ..; ./config.status)
+ cd .. && CONFIG_FILES=src/Makefile ./config.status
$(MAKE)
-orcallator_running.pl: orcallator_running.pl.in
- (cd ..; ./config.status)
- $(MAKE)
-
-restart_orcallator.sh: restart_orcallator.sh.in
- (cd ..; ./config.status)
- $(MAKE)
-
-start_orcallator.sh: start_orcallator.sh.in
- (cd ..; ./config.status)
- $(MAKE)
-
-stop_orcallator.sh: stop_orcallator.sh.in
- (cd ..; ./config.status)
- $(MAKE)
+orca.pl: orca.pl.in
+ cd .. && CONFIG_FILES=src/orca.pl ./config.status
Modified: trunk/orca/src/upgrade_installation.pl
==============================================================================
--- trunk/orca/src/upgrade_installation.pl (original)
+++ trunk/orca/src/upgrade_installation.pl Sat Jul 13 19:22:25 2002
@@ -1,5 +1,8 @@
-# migrate_to_orcallator: migrate from a percollator named installation to
-# an orcallator named install.
+# upgrade_installation: Upgrade and rename any files to the latest
+# installation of Orca:
+#
+# 1) Migrate from a percollator named installation to orcallator.
+# 2) Rename all files with * in them to _times_.
#
# Copyright (C) 1999 Blair Zajac and GeoCities, Inc.
@@ -8,6 +11,14 @@
$| = 1;
+# Check if there is an argument -n, in which case the rename will be
+# shown but not done.
+my $rename = 1;
+if (@ARGV and $ARGV[0] eq '-n') {
+ $rename = 0;
+ shift;
+}
+
# Take a list of directories and rename every file in the directory using
# the following translation in the following order:
# percollator -> orcallator
@@ -21,14 +32,30 @@
sub rename {
my $old_name = $_;
my $new_name = $_;
- $new_name =~ s/percent/\200/g;
- $new_name =~ s/percollator/orcallator/g;
- $new_name =~ s/percol/orcallator/g;
- $new_name =~ s/perc/orcallator/g;
- $new_name =~ s/\200/percent/g;
+ $new_name =~ s:percent:\200:g;
+ $new_name =~ s:percollator:orcallator:g;
+ $new_name =~ s:percol:orcallator:g;
+ $new_name =~ s:perc:orcallator:g;
+ $new_name =~ s:_{2,}:_:g;
+ $new_name =~ s:\200:percent:g;
+
+ # This name change was released between 0.23 and 0.24.
+ $new_name =~ s:\*:_times_:g;
+
+ # These are the final 0.24 names.
+ $new_name =~ s:_percent([\W_]):_pct$1:g;
+ $new_name =~ s:_number([\W_]):_num$1:g;
+ $new_name =~ s:_times([\W_]):_X$1:g;
+
+ # Be careful not to rename filenames exactly named orcallator or orca.
+ $new_name =~ s:orcallator_:o_:g;
+ $new_name =~ s:orca_:o_:g;
+
if ($old_name ne $new_name) {
- print "Renaming $File::Find::dir/$old_name\n";
- rename("$File::Find::dir/$old_name", "$File::Find::dir/$new_name") or
- warn "$0: cannot rename `$File::Find::dir/$old_name': $!\n";
+ print "$File::Find::name -> $new_name\n";
+ if ($rename) {
+ rename($old_name, $new_name) or
+ warn "$0: cannot rename $old_name: $!\n";
+ }
}
}
Modified: trunk/orca/src/orca.pl.in
==============================================================================
--- trunk/orca/src/orca.pl.in (original)
+++ trunk/orca/src/orca.pl.in Sat Jul 13 19:22:25 2002
@@ -1,23 +1,41 @@
-# Orca: display arbitrary data from files onto web pages using RRD.
+# Orca: display arbitrary data from files onto web pages using RRDtool.
#
-# Copyright (C) 1998, 1999 Blair Zajac and GeoCities, Inc.
+# Copyright (C) 1998, 1999 Blair Zajac and Yahoo!, Inc.
use strict;
-require 5.005;
+require 5.004_01;
$| = 1;
+# Set the location of the Orca modules.
+BEGIN {
+# my $prefix = "@prefix@";
+# my $exec_prefix = "@exec_prefix@";
+# my $libdir = "@libdir@";
+# unshift(@INC, $libdir);
+}
+
use Carp;
-use Digest::MD5 2.00 qw(md5);
-use Math::IntervalSearch 1.00 qw(interval_search);
-use Data::Dumper;
+
+# Load any modules that have required version numbers here in
+# addition to the loading of the modules in the other Orca
+# modules to keep all the requiste numbers here.
+use Data::Dumper 2.101;
+use Digest::MD5 2.00 qw(md5_base64);
+use Math::IntervalSearch 1.05 qw(interval_search);
+use Storable 0.603;
+use RRDs 1.000072;
+
+# Set behavior of the Data::Dumper module.
$Data::Dumper::Indent = 1;
$Data::Dumper::Purity = 1;
$Data::Dumper::Deepcopy = 1;
# This is the version of Orca.
use vars qw($VERSION);
-$VERSION = '0.23';
+$VERSION = '0.24';
+
+my $IMAGE_SUFFIX = 'png';
# This is the version number used in creating the DS names in RRDs.
# This should be updated any time a new version of Orca needs some new
@@ -48,13 +66,13 @@
# be exactly the same as the RRA definitions, but they can be. Here
# create a quarterly plot (100 days) between the monthly and yearly
# plots. Only update the quarterly plot daily. The last array here
-# holds the number of days back in time to plot in the GIF. Be
+# holds the number of days back in time to plot in the image. Be
# careful to not increase this so much that the number of data points
-# to plot are greater than the number of pixels available for the GIF,
+# to plot are greater than the number of pixels available for the image,
# otherwise there will be a 30% slowdown due to a reduction
# calculation to resample the data to the lower resolution for the
# plot. For example, with 40 days of 2 hour data, there are 480 data
-# points. For no slowdown to occur, the GIF should be atleast 481
+# points. For no slowdown to occur, the image should be atleast 481
# pixels wide.
my @gif_plot_type = (@rra_plot_type[0..2], 'quarterly', $rra_plot_type[3]);
my @gif_pdp_count = (@rra_pdp_count[0..2], @rra_pdp_count[3, 3]);
@@ -65,6 +83,7 @@
my $opt_verbose = 0;
my $opt_once_only = 0;
my $opt_rrd_update_only = 0;
+my $opt_generate_gifs = 0;
# Set up a signal handler to force looking for new files.
my $force_find_files = 0;
@@ -85,7 +104,10 @@
$bottom = '' unless defined $bottom;
local *FD;
- open(FD, "> $filename") or return;
+ unless (open(FD, "> $filename.htm")) {
+ warn "$0: cannot open `$filename.htm' for writing: $!\n";
+ return;
+ }
print FD <<END;
<html>
@@ -138,8 +160,11 @@
</html>
END
+ my $filename = "$self->{_filename}";
close($self->{_handle}) or
- warn "$0: warning: cannot close `$self->{_filename}': $!\n";
+ warn "$0: warning: cannot close `$filename.htm': $!\n";
+ rename("$filename.htm", $filename) or
+ warn "$0: cannot rename `$filename.htm' to `$filename': $!\n";
}
package Orca::OpenFileHash;
@@ -171,6 +196,17 @@
local *FD;
+ # Uncompress compressed files on the fly and read them in.
+ if ($filename =~ /\.gz$/) {
+ $filename = "gunzip -c $filename |";
+ }
+ elsif ($filename =~ /\.Z$/) {
+ $filename = "uncompress -c $filename |";
+ }
+ elsif ($filename =~ /\.bz2$/) {
+ $filename = "bunzip2 -c $filename |";
+ }
+
unless (open(FD, $filename)) {
warn "$0: warning: cannot open `$filename' for reading: $!\n";
return;
@@ -211,7 +247,16 @@
return $self unless defined $self->{_hash}{$filename};
my $close_value = close($self->{_hash}{$filename}{fd});
- $close_value or warn "$0: warning: cannot close `$filename': $!\n";
+ # Only print a warning on the close if the close failed and the file
+ # descriptor is not a pipe.
+ unless ($close_value) {
+ if ($filename =~ /\|$/) {
+ warn "$0: warning: cannot close pipe `$filename': [$close_value \$?=$?] $!\n" if $opt_verbose > 1;
+ }
+ else {
+ warn "$0: warning: cannot close `$filename': [$close_value] $!\n";
+ }
+ }
my $weight = $self->{_hash}{$filename}{weight};
delete $self->{_hash}{$filename};
@@ -401,14 +446,14 @@
$result;
}
-package Orca::GIFFile;
+package Orca::ImageFile;
-use RRDs 0.99029;
+use RRDs;
use Carp;
sub new {
unless (@_ == 11) {
- confess "$0: Orca::GIFFile::new passed incorrect number of arguments.\n";
+ confess "$0: Orca::ImageFile::new passed incorrect number of arguments.\n";
}
my ($class,
@@ -424,15 +469,15 @@
$my_rrds_ref) = @_;
unless (@$my_rrds_ref) {
- confess "$0: Orca::GIFFile::new passed empty \@rrds_ref reference.\n";
+ confess "$0: Orca::ImageFile::new passed empty \@rrds_ref reference.\n";
}
unless ($name) {
- confess "$0: Orca::GIFFile::new passed empty \$name.\n";
+ confess "$0: Orca::ImageFile::new passed empty \$name.\n";
}
# Remove any special characters from the unique name and do some
# replacements.
- $name = &::strip_key_name($name);
+ $name = &::escape_name($name);
# Create the paths to the html directory and subdirectories.
my $html_dir = $config_options->{html_dir};
@@ -456,7 +501,7 @@
_all_rrd_ref => $rrd_data_files_ref,
_my_rrd_list => [ &::unique(@$my_rrds_ref) ],
_plot_ref => $plot_ref,
- _expire => $config_options->{expire_gifs},
+ _expire => $config_options->{expire_images},
_gif_height => 0,
_gif_width => 0,
_graph_options => []
@@ -466,14 +511,14 @@
my $interval = int($config_files->{$files_key}{interval}+0.5);
for (my $i=0; $i<@gif_plot_type; ++$i) {
# Load the data that helps this class determine if a particular
- # GIF file, such as the daily GIF, is current or needs to be
+ # image file, such as the daily image, is current or needs to be
# created or recreated. The data saved is the Unix epoch file
# modification time. If the file does not exist or the file
# modification time is newer than the time of te last data point
# entered, then save a file modification time of -1 which will
- # definitely cause the GIF to be recreated.
+ # definitely cause the image to be recreated.
my $plot_type = $gif_plot_type[$i];
- my @stat = stat("$gif_basename-$plot_type.gif");
+ my @stat = stat("$gif_basename-$plot_type.$IMAGE_SUFFIX");
if (@stat and $stat[9] <= $plot_end_time) {
$self->{"_${plot_type}_update_time"} = $stat[9];
}
@@ -485,6 +530,13 @@
my $gif_pdp_count = int($gif_pdp_count[$i]*300.0/$interval + 0.5);
$gif_pdp_count = 1 if $gif_pdp_count < 1;
$self->{"_${plot_type}_plot_age"} = $gif_pdp_count*$interval;
+
+ # Generate the unique plot title cotaining the period title
+ # for this plot.
+ $self->{"_${plot_type}_legend"} =
+ &::Capatialize($plot_type) .
+ ' ' .
+ ::replace_group_name($plot_ref->{title}, $group);
}
$self->_update_graph_options;
@@ -499,8 +551,9 @@
# Create the options for RRDs::graph that do not change across any
# invocations of RRDs::graph.
my @options = (
- '-t', ::replace_group_name($plot_ref->{title}, $group),
- '-v', ::replace_group_name($plot_ref->{y_legend}, $group)
+ '-a', 'PNG',
+ '-v', ::replace_group_name($plot_ref->{y_legend}, $group),
+ '-b', $plot_ref->{base}
);
# Add the lower-limit and upper-limit flags if defined.
if (defined $plot_ref->{plot_min}) {
@@ -512,6 +565,15 @@
if (defined $plot_ref->{rigid_min_max}) {
push(@options, '-r');
}
+ if (defined $plot_ref->{logarithmic}) {
+ push(@options, '-o');
+ }
+
+ # By default create PNG files.
+ unless ($opt_generate_gifs) {
+ push(@options, '-a', 'PNG');
+ }
+
my $data_sources = @{$self->{_my_rrd_list}};
for (my $i=0; $i<$data_sources; ++$i) {
my $rrd_key = $self->{_my_rrd_list}[$i];
@@ -535,17 +597,17 @@
}
# Force a break between the plot legend and comments.
- push(@options, 'COMMENT:\s',);
+ push(@options, 'COMMENT:\s', 'COMMENT:\s', 'COMMENT:\s');
# Generate the legends containing the current, average, minimum, and
# maximum values on the plot.
for (my $i=0; $i<$data_sources; ++$i) {
my $legend = $legends[$i];
$legend .= ' ' x ($max_legend_length - length($legend));
- push(@options, "GPRINT:average$i:LAST:$legend Current\\: %f",
- "GPRINT:average$i:AVERAGE:Average\\: %f",
- "GPRINT:average$i:MIN:Min\\: %f",
- "GPRINT:average$i:MAX:Max\\: %f\\l"
+ push(@options, "GPRINT:average$i:LAST:$legend Current\\: %8.3f %S",
+ "GPRINT:average$i:AVERAGE:Average\\: %8.3f %S",
+ "GPRINT:average$i:MIN:Min\\: %8.3f %S",
+ "GPRINT:average$i:MAX:Max\\: %8.3f %S\\l"
);
}
@@ -574,10 +636,10 @@
$_[0]->{_gif_height};
}
-# For this GIF return a string that can be used to size the image
+# For this image return a string that can be used to size the image
# properly in HTML. The output from this subroutine is either an
# empty string or the size of the image.
-sub gif_img_src_size {
+sub image_src_size {
if ($_[0]->{_gif_height} and $_[0]->{_gif_width}) {
return "width=$_[0]->{_gif_width} height=$_[0]->{_gif_height}";
}
@@ -650,15 +712,16 @@
return;
}
- my $gif_filename = "$self->{_gif_basename}-$plot_type.gif";
+ my $gif_filename = "$self->{_gif_basename}-$plot_type.$IMAGE_SUFFIX";
print " Creating `$gif_filename'.\n" if $opt_verbose > 1;
- my $plot_ref = $self->{_plot_ref};
+ my $plot_ref = $self->{_plot_ref};
my ($graph_return, $gif_width, $gif_height) =
RRDs::graph
$gif_filename,
@{$self->{_graph_options}},
+ '-t', $self->{"_${plot_type}_legend"},
'-s', ($plot_end_time-$gif_days_back*$day_seconds),
'-e', $plot_end_time,
'-w', $plot_ref->{plot_width},
@@ -732,18 +795,18 @@
# Remove any special characters from the unique name and do some
# replacements.
- $name = &::strip_key_name($name);
+ $name = &::escape_name($name);
# Create the paths to the data directory.
- my $data_dir = $config_options->{data_dir};
+ my $rrd_dir = $config_options->{rrd_dir};
if ($config_files->{$files_key}{sub_dir}) {
- $data_dir .= "/$group";
- unless (-d $data_dir) {
- warn "$0: making directory `$data_dir'.\n";
- ::recursive_mkdir($data_dir);
+ $rrd_dir .= "/$group";
+ unless (-d $rrd_dir) {
+ warn "$0: making directory `$rrd_dir'.\n";
+ ::recursive_mkdir($rrd_dir);
}
}
- my $rrd_filename = "$data_dir/$name.rrd";
+ my $rrd_filename = "$rrd_dir/$name.rrd";
# Create the new object.
my $self = $class->SUPER::new($rrd_filename);
@@ -951,7 +1014,7 @@
use Carp;
use Digest::MD5 qw(md5);
-use Storable 0.603 qw(dclone);
+use Storable qw(dclone);
use vars qw(@ISA);
@ISA = qw(Orca::DataFile);
@@ -1227,19 +1290,29 @@
next;
}
- # There are three cases to handle. The first is a single data
- # source with a single element that has a regular expression. In
+ # There are three cases to handle:
+ # 1) Regular expression match in the first data with additional datas.
+ # 2) Regular expression match in the first data with no additional datas.
+ # 3) All others.
+ # The first is a single data source that has a regular expression. In
# this case, all of the columns are searched to match the regular
- # expression. The second case is two or more data sources and
- # with one element in the first data source that has a regular
- # expression match. This may generate more than one plot, while
- # the first one will only generate one plot. The final case to
- # handle is when the previous two cases are not true. The last
- # column matched on is stored in @regexp_pos.
- my $number_datas = @{$plot->{data}};
- my $number_elements = @{$plot->{data}[0]};
- my $has_regexp = $plot->{data}[0][0] =~ m:\(.+\):;
- if ($number_datas == 1 and $number_elements == 1 and $has_regexp) {
+ # expression. This generates a single plot with all of the different
+ # data sources plotted on it. The second case is two or more data
+ # sources and where the first data source has a regular expression
+ # match. This may generate more than one plot, for each set of columns
+ # that match the regular expression. The final case to handle is when
+ # the previous two cases are not true. The last column matched on is
+ # stored in @regexp_pos.
+ my $number_datas = @{$plot->{data}};
+ my $number_elements = @{$plot->{data}[0]};
+ my $regexp_element_index = -1;
+ for (my $j=0; $j<@{$plot->{data}[0]}; ++$j) {
+ if ($plot->{data}[0][$j] =~ m:\(.+\):) {
+ $regexp_element_index = $j;
+ last;
+ }
+ }
+ if ($number_datas == 1 and $regexp_element_index != -1) {
# If we've gone up to the last column to match, then go on.
if ($regexp_pos[$i] >= @column_description) {
@@ -1252,36 +1325,37 @@
}
$regexp_pos[$i] = @column_description;
- # In this case we're creating a whole new plot that will have as
- # many data sources as their are columns that match the regular
- # expression. Start by making a deep copy of the plot. Be
- # careful not to make a deep copy of the creates reference,
- # since it can cause recursion.
- my $creates = delete $plot->{creates};
- {
- my $new_plot = dclone($plot);
- $plot->{creates} = $creates;
- $new_plot->{creates} = $creates;
- $plot = $new_plot;
- }
+ # Start by making a deep copy of the plot. Be careful not to make
+ # a deep copy of the `creates' reference, since it can cause
+ # recursion. Replace the regular expression in the first data
+ # with the name of the column that caused the match.
+ my $creates = delete $plot->{creates};
+ my $new_plot = dclone($plot);
+ $plot->{creates} = $creates;
+ $new_plot->{creates} = $creates;
+ $plot = $new_plot;
# At this point we have a copy of plot. Now go through looking
# for all the columns that match and create an additional data
# source for each match.
- my $regexp = $plot->{data}[0][0];
- my $new_data_index = 0;
- my $original_legend = $plot->{legend}[0];
+ my @data_with_regexp = @{$plot->{data}[0]};
+ my $regexp = $data_with_regexp[$regexp_element_index];
+ my $new_data_index = 0;
+ my $original_legend = $plot->{legend}[0];
foreach my $column_name (@column_description) {
my @matches = $column_name =~ /$regexp/;
next unless @matches;
- $plot->{data}[$new_data_index] = [ $column_name ];
+ # Replace the regular expression match with the matched column
+ # name.
+ $data_with_regexp[$regexp_element_index] = $column_name;
+ $plot->{data}[$new_data_index] = [ @data_with_regexp ];
# Copy any items over that haven't been created for this new
# data source. Make sure that any new elements added to
# pcl_plot_append_elements show up here.
unless (defined $plot->{color}[$new_data_index]) {
- $plot->{color}[$new_data_index] = $::cc_default_colors[$new_data_index];
+ $plot->{color}[$new_data_index] = &::get_color($new_data_index);
}
unless (defined $plot->{legend}[$new_data_index]) {
$plot->{legend}[$new_data_index] = $original_legend;
@@ -1292,7 +1366,7 @@
# Replace the regular expression in any legend elements.
my $legend = $plot->{legend}[$new_data_index];
- my $count = 1;
+ my $count = 1;
foreach my $match (@matches) {
$legend =~ s/\$$count/$match/ge;
$legend =~ s/\(.+\)/$match/ge;
@@ -1311,7 +1385,7 @@
$i = $plot->{flush_regexps} ? $oldest_regexp_index : $i + 1;
next unless $new_data_index;
}
- elsif ($number_datas > 1 and $number_elements == 1 and $has_regexp) {
+ elsif ($number_datas > 1 and $regexp_element_index != -1) {
$handle_regexps = 1;
# If we've gone up to the last column to match, then go on. If
@@ -1326,11 +1400,16 @@
}
# Go through all of the columns and stop at the first match.
- my $regexp = $plot->{data}[0][0];
+ my @data_with_regexp = @{$plot->{data}[0]};
+ my $regexp = $data_with_regexp[$regexp_element_index];
+ my $column_description;
my @matches;
for (;$regexp_pos[$i]<@column_description; ++$regexp_pos[$i]) {
@matches = $column_description[$regexp_pos[$i]] =~ /$regexp/;
- last if @matches;
+ if (@matches) {
+ $column_description = $column_description[$regexp_pos[$i]];
+ last;
+ }
}
unless (@matches) {
if ($oldest_regexp_index == $i) {
@@ -1342,16 +1421,20 @@
}
++$regexp_pos[$i];
- # Make a deep copy of the plot. In the string form of the plot
+ # Start by making a deep copy of the plot. Be careful not to make
+ # a deep copy of the `creates' reference, since it can cause
+ # recursion. Replace the regular expression in the first data
+ # with the name of the column that caused the match. Then create
+ # string form of the plot object using Data::Dumper::Dumper and
# replace all of the $1, $2, ... with what was matched in the
- # first data source. The tricky one is to replace the regular
- # expression that did the match in the first place. Also, save
- # a copy of the creates array for this plot so it doesn't also
- # get dumped.
- my $creates = delete $plot->{creates};
- my $d = Data::Dumper->Dump([$plot], [qw(plot)]);
- $plot->{creates} = $creates;
- $d =~ s/$regexp/$matches[0]/mge;
+ # first data source.
+ my $creates = delete $plot->{creates};
+ my $new_plot = dclone($plot);
+ $plot->{creates} = $creates;
+ $plot = $new_plot;
+ $plot->{data}[0][$regexp_element_index] = $column_description;
+ my $d = Data::Dumper->Dump([$plot], [qw(plot)]);
+ $plot->{creates} = $creates;
my $count = 1;
foreach my $match (@matches) {
$d =~ s/\$$count/$match/mge;
@@ -1359,10 +1442,11 @@
++$count;
}
{
+ local $SIG{__DIE__} = 'DEFAULT';
local $SIG{__WARN__} = sub { die $_[0] };
eval $d;
}
- die "$0: internal error: eval on\n $d\nOutput: $@\n" if $@;
+ die "$0: internal error: eval on\n\n$d\nOutput: $@\n" if $@;
# Either increment the index or reset it to the oldest regexp
# index.
@@ -1370,6 +1454,7 @@
$i = $plot->{flush_regexps} ? $oldest_regexp_index : $i + 1;
}
else {
+
$old_i = $i++;
++$oldest_regexp_index unless $handle_regexps;
}
@@ -1427,20 +1512,25 @@
# data. Also create an unique Orca data file name for this plot
# and a name for this plot that does not include the group.
my @my_rrds;
+ my @my_short_rrds;
my @no_group_name;
my @group_name;
+ my $previous_group = '';
+ my $previous_subgroup = '';
for (my $j=0; $j<@datas; ++$j) {
- my $expr = "@{$datas[$j]}";
+ my $expr = undef;
my $sub_expr_sub = undef;
my $data_name = join('_', @{$plot->{data}[$j]});
if (defined $datas[$j]) {
+ $expr = "@{$datas[$j]}";
my $sub_expr = "sub {\n return $expr;\n}\n";
my $sub_expr_md5 = md5($sub_expr);
unless (defined ($sub_expr_sub = $choose_data_sub_cache{$sub_expr_md5})) {
{
+ local $SIG{__DIE__} = 'DEFAULT';
local $SIG{__WARN__} = sub { die $_[0] };
$sub_expr_sub = eval $sub_expr;
}
@@ -1459,9 +1549,27 @@
push(@no_group_name, "${files_key}_${data_name}");
push(@group_name, $name);
- # Create a new RRD only if it doesn't already exist and if a
- # valid get data subroutine is created. Keep the
- # choose_data_sub for this file.
+ # Create a short name that may exclude the group and subgroup if the
+ # previous data had the same group and subgroup.
+ my $short_name_with_subgroup;
+ if ($files_key eq $previous_group) {
+ if ($group eq $previous_subgroup) {
+ $short_name_with_subgroup = "__$data_name";
+ }
+ else {
+ $short_name_with_subgroup = "_${group}_${data_name}";
+ $previous_subgroup = $group;
+ }
+ }
+ else {
+ $previous_group = $files_key;
+ $previous_subgroup = $group;
+ $short_name_with_subgroup = $name;
+ }
+
+ # Create a new RRD only if it doesn't already exist and if a valid
+ # get data subroutine is created. Keep the choose_data_sub for this
+ # file.
if (defined $sub_expr_sub) {
$choose_data_expr .= " '$name', $expr,\n";
unless (defined $rrd_data_files_ref->{$name}) {
@@ -1476,6 +1584,7 @@
$self->{_all_rrd_ref} = $rrd_data_files_ref;
$my_rrd_list{$name} = 1;
push(@my_rrds, $name);
+ push(@my_short_rrds, $short_name_with_subgroup);
}
}
@@ -1486,16 +1595,16 @@
$gif->add_rrds(@my_rrds);
}
else {
- $gif = Orca::GIFFile->new($config_options,
- $config_files,
- $config_plots,
- $files_key,
- $group,
- join(',', @my_rrds),
- join(',', @no_group_name),
- $plot,
- $rrd_data_files_ref,
- \@my_rrds);
+ $gif = Orca::ImageFile->new($config_options,
+ $config_files,
+ $config_plots,
+ $files_key,
+ $group,
+ join(',', @my_short_rrds),
+ join(',', @no_group_name),
+ $plot,
+ $rrd_data_files_ref,
+ \@my_rrds);
$gif_files_ref->{hash}{$group_name} = $gif;
push(@{$gif_files_ref->{list}}, $gif);
push(@{$config_plots->[$old_i]{creates}}, $gif);
@@ -1509,6 +1618,7 @@
$choose_data_expr .= " );\n}\n";
{
+ local $SIG{__DIE__} = 'DEFAULT';
local $SIG{__WARN__} = sub { die $_[0] };
$self->{_choose_data_sub} = eval $choose_data_expr;
}
@@ -1604,7 +1714,7 @@
my $number_added = 0;
my $close_once_done = 0;
my $number_columns = @{$self->{_column_description}};
- while (my $line = <$fd>) {
+ while (defined(my $line = <$fd>)) {
# Skip the line if the word timestamp appears in it. This is a
# temporary fix for orcallator.se to place a new information line
# in the output file when it starts up.
@@ -1686,12 +1796,16 @@
package main;
sub Usage {
- die "usage: $0 [-o] [-r] [-v] config_file\n";
+ die "usage: $0 [-gifs] [-o] [-r] [-v] config_file\n";
}
while (@ARGV and $ARGV[0] =~ /^-\w/) {
my $arg = shift;
- if ($arg eq '-o') {
+ if ($arg eq '-gifs') {
+ ++$opt_generate_gifs;
+ $IMAGE_SUFFIX = 'gif';
+ }
+ elsif ($arg eq '-o') {
++$opt_once_only;
}
elsif ($arg eq '-v') {
@@ -1707,6 +1821,11 @@
Usage unless @ARGV;
+# Install signal handlers to clean up.
+$SIG{INT} = \&catch_signal;
+$SIG{TERM} = \&catch_signal;
+$SIG{__DIE__} = \&catch_signal;
+
if ($opt_verbose) {
print "Orca version $VERSION using RRDs version $RRDs::VERSION.\n";
}
@@ -1715,9 +1834,45 @@
exit 0;
+# This is the name of the locking directory.
+my $locking_directory;
+
+# This is set to 1 if the locking directory should be removed.
+my $rmdir_locking_directory;
+
+sub clean_up_and_quit {
+ if ($rmdir_locking_directory and
+ $locking_directory and
+ -d $locking_directory) {
+ rmdir($locking_directory) or
+ warn "$0: cannot rmdir `$locking_directory': $!\n";
+ }
+ exit 0;
+}
+
+sub catch_signal {
+ my $signal = shift;
+ chomp($signal);
+ $signal =~ s/\.+$//;
+ if ($signal =~ /$0/o) {
+ print STDERR "$signal.\n";
+ }
+ else {
+ print STDERR "$0: caught signal $signal.\n";
+ }
+ clean_up_and_quit;
+}
+
sub main {
my $config_filename = shift;
+ # Create a locking directory using the configuration filename.
+ $locking_directory = "$config_filename.lock";
+ unless (mkdir($locking_directory, 0755)) {
+ die "$0: cannot create locking directory `$locking_directory': $!\n";
+ }
+ $rmdir_locking_directory = 1;
+
my $start_time = time;
# Load the configuration file.
@@ -1744,6 +1899,8 @@
if ($opt_verbose) {
printf "Running time is %d:%02d minutes.\n", $minutes, $seconds;
}
+
+ &clean_up_and_quit;
}
# Given a directory name, attempt to make all necessary directories.
@@ -1930,6 +2087,8 @@
}
}
+# system("/bin/ps -p $$ -o\"rss vsz pmem time user pid comm\"");
+
# Save the current state of the source data files.
if ($found_new_files or $updated_source_files) {
&save_state($config_options->{state_file}, $new_found_files_ref);
@@ -1938,10 +2097,12 @@
# Create the HTML and GIF files now.
unless ($opt_rrd_update_only) {
# Plot the data in each gif.
- print "Updating GIFs.\n" if $opt_verbose;;
+ print "Updating " . uc($IMAGE_SUFFIX). "s.\n" if $opt_verbose;;
+# system("/bin/ps -p $$ -o\"rss vsz pmem time user pid comm\"");
foreach my $gif (@{$gif_files_ref->{list}}) {
$gif->plot;
}
+# system("/bin/ps -p $$ -o\"rss vsz pmem time user pid comm\"");
# Make the HTML files.
if ($found_new_files) {
@@ -2050,12 +2211,12 @@
$index_html->print("<hr>\n<font size=\"-2\">");
# The first step is to create the HTML files for each different
- # group. This is only done if there is more than one group gathered
- # from the configuration and input data files. If there is more
- # than one group first list the different available groups and
- # create for each group an HTML file that contains HREFs to the GIFs
- # for that group. Also create an HTML file for the daily, weekly,
- # monthly, and yearly GIFs.
+ # subgroup. This is only done if there is more than one subgroup
+ # gathered from the configuration and input data files. If there
+ # is more than one subgroup first list the different available subgroups
+ # and create for each subgroup an HTML file that contains HREFs to the
+ # images for that subgroup. Also create an HTML file for different time
+ # span images (i.e., daily, monthly, etc).
# This variable sets the number of groups to place into a single
# row.
@@ -2072,7 +2233,7 @@
my $html_group = ($number_groups == 1 and !$group) ? 'Everything' : $group;
# Create the HTML code for the main index.html file.
- my $group_basename = strip_key_name($html_group);
+ my $group_basename = escape_name($html_group);
my $element = "<table border=2><tr><td><b>$html_group</b></td></tr>\n<tr><td>\n";
foreach my $plot_type (@gif_plot_type) {
$element .= "<a href=\"$group_basename-$plot_type.html\">";
@@ -2088,8 +2249,7 @@
@table_columns = ();
}
- # Create the daily, weekly, monthly, yearly, and all HTML files
- # for this group.
+ # Create the various time span HTML files for this subgroup.
my @html_files;
foreach my $plot_type (@gif_plot_type, 'all') {
my $href = "$group_basename-$plot_type.html";
@@ -2109,9 +2269,8 @@
Plot_Type => $Plot_Type});
}
- # At the top of the daily, weekly, monthly, yearly, and all HTML
- # files add HREFs to the other date span HTML files in the same
- # group.
+ # At the top of the various time span HTML files add HREFs to the
+ # other date span HTML files in the same subgroup.
my $href_html;
foreach my $plot_type (@html_files) {
$href_html .= "<a href=\"$plot_type->{href}\">" .
@@ -2121,9 +2280,11 @@
$html_file->{fd}->print($href_html);
}
- # Use only those GIFs now that have the same group name as the
- # HTML files that are being created.
- my @gifs = grep {$group eq $_->group} @{$gif_files_ref->{list}};
+ # Use only those images now that have the same subgroup name as the
+ # HTML files that are being created. Make sure the images appear
+ # in the files in the order listed in the configuration file.
+ my @gifs = sort {$a->{_plot_ref}{_index} <=> $b->{_plot_ref}{_index}}
+ grep {$group eq $_->group} @{$gif_files_ref->{list}};
if (@gifs > 1) {
my $href_html = "<hr>";
for (my $i=0; $i<@gifs; ++$i) {
@@ -2141,9 +2302,9 @@
my $gif = $gifs[$i];
my $name = $gif->name;
my $title = replace_group_name($gif->plot_ref->{title}, $gif->group);
- my $href = "href=\"" . strip_key_name($name) . ".html\"";
+ my $href = "href=\"" . escape_name($name) . ".html\"";
my $sub_dir = $config_files->{$gif->files_key}{sub_dir};
- my $gif_size = $gif->gif_img_src_size;
+ my $gif_size = $gif->image_src_size;
foreach my $html_file (@html_files) {
$html_file->{fd}->print("<hr>\n<h2><a ${href} name=\"$i\">$html_file->{Plot_Type} " .
@@ -2153,7 +2314,7 @@
# Put the proper GIFs into each HTML file. The all HTML file is
# listed last and requires special handling.
for (my $j=0; $j<@html_files-1; ++$j) {
- my $gif_filename = "$name-$html_files[$j]{plot_type}.gif";
+ my $gif_filename = "$name-$html_files[$j]{plot_type}.$IMAGE_SUFFIX";
$gif_filename = "$group/$gif_filename" if $sub_dir;
my $html = "<a $href><img src=\"$gif_filename\" $gif_size " .
"alt=\"$html_files[$j]{Plot_Type} $title\"></a>\n";
@@ -2189,101 +2350,98 @@
# This sets the number of plot types to place into a single row in
# the main index.html.
$table_number_columns = 1;
- @table_columns = ();
+ @table_columns = ();
# Go through all of the configured plots.
for (my $i=0; $i<@$config_plots; ++$i) {
next unless @{$config_plots->[$i]{creates}};
- # Create an ordered list of GIFs sorted on the legend name for
- # each GIF. Remember, each GIF represented here actually
- # represents the set of daily, weekly, monthly, and yearly GIF
- # files. %gif_legend_no_group is a hash keyed by the GIF that
- # contains the legend with no group substitution for the GIF. The
- # %legends hash is keyed by the legend name with no group
- # substitution and contains a reference to an array of GIFs that
+ # Create an ordered list of images sorted on the legend name for
+ # each image. Remember, each image represented here actually
+ # represents the set of time span image files.
+ # %image_legend_no_subgroup is a hash keyed by the image that
+ # contains the legend with no subgroup substitution for the image.
+ # The %legends hash is keyed by the legend name with no subgroup
+ # substitution and contains a reference to an array of image that
# have the same legend name.
- my %gif_legend_no_group;
+ my %gif_legend_no_subgroup;
my %same_legends_gif_list;
foreach my $gif (@{$config_plots->[$i]{creates}}) {
- my $legend_no_group = replace_group_name($gif->plot_ref->{title}, '');
- $gif_legend_no_group{$gif} = $legend_no_group;
+ my $legend_no_subgroup = replace_group_name($gif->plot_ref->{title}, '');
+ $gif_legend_no_subgroup{$gif} = $legend_no_subgroup;
- unless (defined $same_legends_gif_list{$legend_no_group}) {
- $same_legends_gif_list{$legend_no_group} = [];
+ unless (defined $same_legends_gif_list{$legend_no_subgroup}) {
+ $same_legends_gif_list{$legend_no_subgroup} = [];
}
- push(@{$same_legends_gif_list{$legend_no_group}}, $gif);
+ push(@{$same_legends_gif_list{$legend_no_subgroup}}, $gif);
}
# Put together the correctly ordered list of GIFs using the array
# references in the legends hash. Sort the GIFs using the special
# sorting routine for group names.
my @gifs;
- foreach my $legend_no_group (sort keys %same_legends_gif_list) {
- @{$same_legends_gif_list{$legend_no_group}} =
- sort sort_group_names @{$same_legends_gif_list{$legend_no_group}};
- push(@gifs, @{$same_legends_gif_list{$legend_no_group}});
+ foreach my $legend_no_subgroup (sort keys %same_legends_gif_list) {
+ @{$same_legends_gif_list{$legend_no_subgroup}} =
+ sort sort_group_names @{$same_legends_gif_list{$legend_no_subgroup}};
+ push(@gifs, @{$same_legends_gif_list{$legend_no_subgroup}});
}
# This hash keyed by legend name holds an array of references to a
# hash of file descriptor, HREF and plot type.
my %legend_html_files;
- # Now for each set of daily, weekly, monthly and yearly GIFs, go
+ # Now for each set of time span (i.e., daily, weekly, etc) images, go
# through and create the correct HTML files.
foreach my $gif (@gifs) {
- my $no_group_name = strip_key_name($gif->no_group_name);
- my $legend_no_group = $gif_legend_no_group{$gif};
+ my $no_group_name = escape_name($gif->no_group_name);
+ my $legend_no_subgroup = $gif_legend_no_subgroup{$gif};
# If this is the first time that this legend has been seen in
# for creating the proper HTML files, then create the new HTML
# files and set up the top of them properly and place into the
# main index.html the proper HREFs to these files.
- unless (defined $legend_html_files{$legend_no_group}) {
-
- # Now create the HTML files for the daily, weekly, monthly,
- # yearly, and all plots. Use the legend name to create this
- # list.
- $legend_html_files{$legend_no_group} = [];
+ unless (defined $legend_html_files{$legend_no_subgroup}) {
+ # Now create the HTML files for the time span plots. Use the
+ # legend name to create this list.
+ $legend_html_files{$legend_no_subgroup} = [];
foreach my $plot_type (@gif_plot_type, 'all') {
my $href = "$no_group_name-$plot_type.html";
my $filename = "$html_dir/$href";
my $Plot_Type = Capatialize($plot_type);
my $fd = Orca::HTMLFile->new($filename,
- "$Plot_Type $legend_no_group",
+ "$Plot_Type $legend_no_subgroup",
$config_options->{html_page_header},
"<hr>\n$config_options->{html_page_footer}");
unless ($fd) {
warn "$0: warning: cannot open `$filename' for writing: $!\n";
next;
}
- push(@{$legend_html_files{$legend_no_group}},
+ push(@{$legend_html_files{$legend_no_subgroup}},
{fd => $fd,
href => $href,
plot_type => $plot_type,
Plot_Type => $Plot_Type});
}
- # For each of the daily, weekly, monthy, yearly and all HTML
- # files add at the top of the file HREFs to all of the daily,
- # weekly, monthly, yearly and all HTML files. Also add HREFs
- # to the different groups later on in the same HTML file.
- my @legend_html_files = @{$legend_html_files{$legend_no_group}};
+ # For each of the time span HTML files add at the top of the
+ # file HREFs to all of time span HTML files. Also add HREFs
+ # to the different subgroups later on in the same HTML file.
+ my @legend_html_files = @{$legend_html_files{$legend_no_subgroup}};
my $href_html;
foreach my $plot_type (@legend_html_files) {
$href_html .= "<a href=\"$plot_type->{href}\">" .
- "$plot_type->{Plot_Type} $legend_no_group</a><br>\n";
+ "$plot_type->{Plot_Type} $legend_no_subgroup</a><br>\n";
}
# Add to the top of the file HREFs to all of the different
# groups in the HTML file. This makes traversing the HTML
# page easier. Do this if there are two or more groups in
# this HTML page.
- if (@{$same_legends_gif_list{$legend_no_group}} > 1) {
+ if (@{$same_legends_gif_list{$legend_no_subgroup}} > 1) {
$href_html .= "<hr>\n";
- foreach my $legend_gif (@{$same_legends_gif_list{$legend_no_group}}) {
+ foreach my $legend_gif (@{$same_legends_gif_list{$legend_no_subgroup}}) {
my $group = $legend_gif->group;
$href_html .= "<a href=\"#$group\">[$group]</a><spacer size=10>\n";
}
@@ -2292,9 +2450,14 @@
$html_file->{fd}->print($href_html);
}
- # Create the HTML code that goes into the main index.html that
- # links to these other HTML files.
- my $element = "<td><b>$legend_no_group</b></td>\n";
+ # Create the HTML code that goes into the main index.html that links
+ # to these other HTML files. If the configuration file contains
+ # an href for information on this plot, then include the href here.
+ my $element = "<td><b>$legend_no_subgroup";
+ if (my $legend_href = $config_plots->[$i]{href}) {
+ $element .= " [<a href=\"$legend_href\">Info</a>]";
+ }
+ $element .= "</b></td>\n";
foreach my $plot_type (@gif_plot_type, 'all') {
$element .= "<td><a href=\"$no_group_name-$plot_type.html\">";
$element .= Capatialize($plot_type) . "</a></td>\n";
@@ -2306,12 +2469,10 @@
}
}
- # At this point the HTML files for this set of daily, weekly,
- # monthly, and yearly GIFs have been opened. Now create the
- # summary HTML file that contains only four GIF images, the
- # daily, weekly, monthly, and yearly GIFs for a particular plot
- # for a particular group.
- my $with_group_name = strip_key_name($gif->name);
+ # At this point the HTML files for this set of of images have been
+ # opened. Now create the summary HTML file that contains only the
+ # images for a particular plot for a particular subgroup.
+ my $with_group_name = escape_name($gif->name);
my $legend_with_group = replace_group_name($gif->plot_ref->{title},
$gif->group);
my $summarize_name = "$html_dir/$with_group_name.html";
@@ -2326,11 +2487,11 @@
my $sub_dir = $config_files->{$gif->files_key}{sub_dir};
my $gif_filename = $with_group_name;
$gif_filename = $gif->group . "/$gif_filename" if $sub_dir;
- my $gif_size = $gif->gif_img_src_size;
+ my $gif_size = $gif->image_src_size;
foreach my $plot_type (@gif_plot_type) {
my $Plot_Type = Capatialize($plot_type);
$summarize_html->print("<hr>\n<h2>$Plot_Type $legend_with_group</h2>\n",
- "<img src=\"$gif_filename-$plot_type.gif\"",
+ "<img src=\"$gif_filename-$plot_type.$IMAGE_SUFFIX\"",
$gif_size,
"alt=\"$Plot_Type $legend_with_group\">\n");
}
@@ -2341,15 +2502,15 @@
my $href = "href=\"$with_group_name.html\"";
- my @legend_html_files = @{$legend_html_files{$legend_no_group}};
- $legend_html_files[-1]{fd}->print("<hr>\n<h2><a ${href} name=\"$group\">$group $legend_no_group</a></h2>\n");
+ my @legend_html_files = @{$legend_html_files{$legend_no_subgroup}};
+ $legend_html_files[-1]{fd}->print("<hr>\n<h2><a ${href} name=\"$group\">$group $legend_no_subgroup</a></h2>\n");
for (my $i=0; $i<@legend_html_files-1; ++$i) {
my $Plot_Type = $legend_html_files[$i]{Plot_Type};
- my $gif_filename = "$name-$legend_html_files[$i]{plot_type}.gif";
+ my $gif_filename = "$name-$legend_html_files[$i]{plot_type}.$IMAGE_SUFFIX";
$gif_filename = "$group/$gif_filename" if $sub_dir;
my $html = "<a $href><img src=\"$gif_filename\" $gif_size " .
- "alt=\"$Plot_Type $group $legend_no_group\"></a>\n";
- $legend_html_files[$i]{fd}->print("<hr>\n<h2><a ${href} name=\"$group\">$Plot_Type $group $legend_no_group</a></h2>\n");
+ "alt=\"$Plot_Type $group $legend_no_subgroup\"></a>\n";
+ $legend_html_files[$i]{fd}->print("<hr>\n<h2><a ${href} name=\"$group\">$Plot_Type $group $legend_no_subgroup</a></h2>\n");
$legend_html_files[$i]{fd}->print($html);
$legend_html_files[-1]{fd}->print($html);
}
@@ -2451,19 +2612,48 @@
$title;
}
-# Strip special characters from key names.
-sub strip_key_name {
+# Replace special characters from key names, remove redundant characters,
+# and shorten the names so the maximum path name is not exceeded. If
+# the name is still too long such that the maximum filename path length
+# may be exceeded by appending -daily.html or other names to the name,
+# which is choosen to be 235 characters, then compute a MD5 hash of the
+# name, trim the name the name to 210 characters, which leaves enough space
+# for a 22 byte base64 MD5 digest, plus a separating '-' and plus the prefix,
+# and append the MD5 name.
+sub escape_name {
my $name = shift;
+
$name =~ s/:/_/g;
$name =~ s:/:_per_:g;
$name =~ s:\s+:_:g;
- $name =~ s:%:_percent_:g;
- $name =~ s:#:_number_:g;
- $name =~ s:_{2,}:_:g;
+ $name =~ s:%:_pct_:g;
+ $name =~ s:#:_num_:g;
+ $name =~ s:\*:_X_:g;
+
+ # Trim anything containing orcallator, orca.
+ $name =~ s:orcallator:o:g;
+ $name =~ s:orca:o:g;
# Remove trailing _'s.
$name =~ s:_+$::;
$name =~ s:_+,:,:g;
+
+ # Replace multiple _'s with one _, except when they follow a , which
+ # happens when the same group and subgroup appear for a new data
+ # source.
+ $name =~ s:,_{2,}:\200:g;
+ $name =~ s:_{2,}:_:g;
+ $name =~ s:\200:,__:g;
+
+ if (length($name) > 235) {
+ my $md5 = md5_base64($name);
+ $name = substr($name, 0, 210) . "-$md5";
+
+ # Be careful to convert any / characters _, since / is a valid base64
+ # character and should not be used.
+ $name =~ s:/:_:g;
+ }
+
$name;
}
@@ -2559,7 +2749,7 @@
$new_found_files_ref->{$filename} = $old_found_files_ref->{$filename};
}
else {
- print " $filename\n" if $opt_verbose;
+# print " $filename\n" if $opt_verbose;
my $data_file =
Orca::SourceDataFile->new($filename,
$config_files->{$files_key}{interval},
@@ -2695,51 +2885,59 @@
my @cc_optional_options;
my @cc_optional_files;
my @cc_optional_plots;
+my @cc_default_colors;
+
+sub get_color {
+ $cc_default_colors[$_[0] % @cc_default_colors];
+}
sub check_config {
my ($config_filename, $config_options, $config_files, $config_plots) = @_;
unless (@cc_required_options) {
- @cc_required_options = qw(state_file
- data_dir
- html_dir);
+ @cc_required_options = qw(html_dir
+ rrd_dir
+ state_file);
@cc_required_files = qw(column_description
date_source
find_files
interval);
@cc_required_plots = qw(data
source);
- @cc_optional_options = qw(expire_gifs
+ @cc_optional_options = qw(expire_images
html_page_footer
html_page_header
html_top_title
late_interval
sub_dir
warn_email);
- @cc_optional_files = qw(date_format
- reopen);
+ @cc_optional_files = qw(reopen);
@cc_optional_plots = qw(flush_regexps
+ href
plot_width
- plot_height
- rigid_min_max);
+ plot_height);
# This is a special variable that gets used in add_plots.
- @::cc_default_colors = ('00ff00', # Green
+ @cc_default_colors = ('00ff00', # Green
'0000ff', # Blue
'ff0000', # Red
'a020f0', # Magenta
'ffa500', # Orange
'a52a2a', # Brown
- '00ffff'); # Cyan
+ '00ffff', # Cyan
+ '00aa00', # Dark Green
+ 'eeee00', # Yellow
+ '5e5e5e', # Dark Gray
+ '0000aa'); # Dark Blue
}
- # If data_dir is not set, then use base_dir. Only die if both are
+ # If rrd_dir is not set, then use base_dir. Only die if both are
# not set.
- unless (defined $config_options->{data_dir}) {
+ unless (defined $config_options->{rrd_dir}) {
if (defined $config_options->{base_dir}) {
- $config_options->{data_dir} = $config_options->{base_dir};
+ $config_options->{rrd_dir} = $config_options->{base_dir};
}
else {
- die "$0: error: must define `data_dir' in `$config_filename'.\n";
+ die "$0: error: must define `rrd_dir' in `$config_filename'.\n";
}
}
@@ -2750,8 +2948,8 @@
}
}
- # Check if the data_dir and html_dir directories exist.
- foreach my $dir_key ('html_dir', 'data_dir') {
+ # Check if the html_dir and rrd_dir directories exist.
+ foreach my $dir_key ('html_dir', 'rrd_dir') {
my $dir = $config_options->{$dir_key};
die "$0: error: please create $dir_key `$dir'.\n" unless -d $dir;
}
@@ -2772,11 +2970,13 @@
$expr =~ s/interval/\$_[0]/g;
my $sub;
{
+ local $SIG{__DIE__} = 'DEFAULT';
local $SIG{__WARN__} = sub { die $_[0] };
$sub = eval $expr;
}
die "$0: cannot evaluate command for `late_interval' on\n $expr\nOutput: $@\n" if $@;
{
+ local $SIG{__DIE__} = 'DEFAULT';
local $SIG{__WARN__} = sub { die $_[0] };
eval '&$sub(3.1415926) + 0;';
}
@@ -2809,12 +3009,12 @@
}
$config_options->{find_times} = [ sort { $a <=> $b } @find_times ];
- # There must be at least one list of files.
+ # There must be at least one group.
unless (keys %$config_files) {
- die "$0: error: must define at least one `files' in `$config_filename'.\n";
+ die "$0: error: must define at least one `group' in `$config_filename'.\n";
}
- # For each files parameter there are required options. Convert the
+ # For each group there are required options. Convert the
# unblessed reference to a hash to a Orca::Config::FilesGroup
# object.
foreach my $files_key (keys %$config_files) {
@@ -2823,11 +3023,11 @@
foreach my $option (@cc_required_files) {
unless (defined $files_group->{$option}) {
- die "$0: error: must define `$option' for `files $files_key' in `$config_filename'.\n";
+ die "$0: error: must define `$option' for `group $files_key' in `$config_filename'.\n";
}
}
- # Optional files options will be set to '' here if they haven't
+ # Optional group options will be set to '' here if they haven't
# been set by the user.
foreach my $option (@cc_optional_files) {
unless (defined $files_group->{$option}) {
@@ -2841,15 +3041,15 @@
my $date_source = $files_group->{date_source}[0];
if ($date_source eq 'column_name') {
unless (@{$files_group->{date_source}} == 2) {
- die "$0: error: incorrect number of arguments for `date_source' for `files $files_key'.\n";
+ die "$0: error: incorrect number of arguments for `date_source' for `group $files_key'.\n";
}
unless (defined $files_group->{date_format}) {
- die "$0: error: must define `date_format' with `date_source columns ...' for `files $files_key'.\n";
+ die "$0: error: must define `date_format' with `date_source columns ...' for `group $files_key'.\n";
}
}
else {
unless ($date_source eq 'file_mtime') {
- die "$0: error: illegal argument for `date_source' for `files $files_key'.\n";
+ die "$0: error: illegal argument for `date_source' for `group $files_key'.\n";
}
}
$files_group->{date_source}[0] = $date_source;
@@ -2865,23 +3065,24 @@
# since this will match single character files and directories.
my $sub_dir = 0;
my %find_files;
- my $number_finds = @{$files_group->{find_files}};
+ my $number_finds = @{$files_group->{'find_files'}};
for (my $i=0; $i<$number_finds; ++$i) {
- my $orig_find = $files_group->{find_files}[$i];
+ my $orig_find = $files_group->{'find_files'}[$i];
my $find = $orig_find;
$find =~ s:^\\./::;
$find =~ s:/\\./:/:g;
$find = $orig_find unless $find;
- $files_group->{find_files}[$i] = $find;
+ $files_group->{'find_files'}[$i] = $find;
my $test_string = 'abcdefg';
+ local $SIG{__DIE__} = 'DEFAULT';
local $SIG{__WARN__} = sub { die $_[0] };
eval { $test_string =~ /$find/ };
- die "$0: error: illegal regular expression in `find_files $orig_find' for `files $files_key' in `$config_filename':\n$@\n" if $@;
+ die "$0: error: illegal regular expression in `find_files $orig_find' for `group $files_key' in `$config_filename':\n$@\n" if $@;
$find_files{$find} = 1;
$sub_dir = 1 if $find =~ m:\(.+\):;
}
- $files_group->{find_files} = [sort keys %find_files];
- $files_group->{sub_dir} = $sub_dir || $config_options->{sub_dir};
+ $files_group->{'find_files'} = [sort keys %find_files];
+ $files_group->{sub_dir} = $sub_dir || $config_options->{sub_dir};
}
# There must be at least one plot.
@@ -2918,6 +3119,16 @@
$plot->{plot_width} = 500 unless $plot->{plot_width};
$plot->{plot_height} = 125 unless $plot->{plot_height};
+ # Make sure the base is either 1000 or 1024.
+ if (defined $plot->{base} && length($plot->{base})) {
+ if ($plot->{base} != 1000 and $plot->{base} != 1024) {
+ die "$0: error: plot #$j must define base to be either 1000 or 1024.\n";
+ }
+ }
+ else {
+ $plot->{base} = 1000;
+ }
+
# Set the plot minimum and maximum values to U unless they are
# set.
unless (defined $plot->{data_min}) {
@@ -2950,13 +3161,13 @@
$plot->{data_type} = 'GAUGE';
}
- # The data source needs to be a valid files key.
+ # The data source needs to be a valid group key.
my $source = $plot->{source};
unless (defined $config_files->{$source}) {
- die "$0: error: plot #$j `source $source' references non-existant `files' in `$config_filename'.\n";
+ die "$0: error: plot #$j `source $source' references non-existant `group' in `$config_filename'.\n";
}
unless ($plot->{source}) {
- die "$0: error: plot #$j `source $source' requires one files_key argument in `$config_filename'.\n";
+ die "$0: error: plot #$j `source $source' requires one group argument in `$config_filename'.\n";
}
# Set the legends of any columns not defined.
@@ -2973,7 +3184,7 @@
$plot->{color} = [];
}
for (my $k=@{$plot->{color}}; $k<$number_datas; ++$k) {
- $plot->{color}[$k] = $::cc_default_colors[$k];
+ $plot->{color}[$k] = get_color($k);
}
# Check each line type setting.
@@ -3084,14 +3295,14 @@
$pcl_files_key = '';
$pcl_plot_index = '-0';
@pcl_option_elements = qw(base_dir
- data_dir
- expire_gifs
+ expire_images
find_times
html_dir
html_page_footer
html_page_header
html_top_title
late_interval
+ rrd_dir
state_file
sub_dir
warn_email);
@@ -3101,14 +3312,17 @@
find_files
interval
reopen);
- @pcl_plot_elements = qw(color
+ @pcl_plot_elements = qw(base
+ color
data
data_min
data_max
data_type
flush_regexps
+ href
legend
line_type
+ logarithmic
optional
plot_height
plot_min
@@ -3122,11 +3336,12 @@
data
legend
line_type);
- @pcl_filepath_elements = qw(data_dir
- find_files
+ @pcl_filepath_elements = qw(find_files
html_dir
+ rrd_dir
state_file);
@pcl_no_arg_elements = qw(flush_regexps
+ logarithmic
optional
rigid_min_max);
@pcl_keep_as_array_options = qw();
@@ -3179,7 +3394,7 @@
return;
}
unless (grep {$key eq $_} @pcl_file_elements) {
- warn "$0: warning: directive `$key' unknown for files at line $line_number in `$config_filename'.\n";
+ warn "$0: warning: directive `$key' unknown for group at line $line_number in `$config_filename'.\n";
return;
}
@@ -3246,25 +3461,26 @@
}
# Take care of files to watch.
- if ($key eq 'files') {
+ if ($key eq 'group') {
unless (@line) {
- die "$0: error: files needs a files name followed by { at line $line_number in `$config_filename'.\n"
+ die "$0: error: group needs a group name followed by { at line $line_number in `$config_filename'.\n"
}
$pcl_files_key = shift(@line);
unless (@line == 1 and $line[0] eq '{' ) {
- warn "$0: warning: '{' required after 'files $pcl_files_key' at line $line_number in `$config_filename'.\n";
+ warn "$0: warning: '{' required after `group $pcl_files_key' at line $line_number in `$config_filename'.\n";
}
if (defined $config_files->{$pcl_files_key}) {
- warn "$0: warning: files `$key' at line $line_number in `$config_filename' previously defined.\n";
+ warn "$0: warning: `group $key' at line $line_number in `$config_filename' previously defined.\n";
}
return;
}
- # Take care of plots to make.
+ # Take care of plots to make. Include in each plot its index.
if ($key eq 'plot') {
$pcl_plot_index =~ s:^-::;
+ $config_plots->[$pcl_plot_index]{_index} = $pcl_plot_index;
unless (@line == 1 and $line[0] eq '{') {
- warn "$0: warning: '{' required after 'plot' at line $line_number in `$config_filename'.\n";
+ warn "$0: warning: '{' required after `plot' at line $line_number in `$config_filename'.\n";
}
return;
}
@@ -3325,11 +3541,11 @@
=head1 NAME
-orca - Make HTML & GIF plots of daily, weekly, monthly & yearly data
+orca - Make HTML & PNG plots of daily, weekly, monthly & yearly data
=head1 SYNOPSIS
- orca [-o] [-r] [-v [-v [-v]]] configuration_file
+ orca [-gifs] [-o] [-r] [-v [-v [-v]]] configuration_file
=head1 DESCRIPTION
@@ -3343,7 +3559,7 @@
* Remembers the last modification times for files so they do not have to
be reread continuously.
* Can plot the same type of data from different files into different
- or the same GIFs.
+ or the same PNGs.
* Different plots can be created based on the filename.
* Parses the date from the text files.
* Create arbitrary plots of data from different columns.
@@ -3353,16 +3569,16 @@
ones, using one or more columns.
* Group multiple columns into a single plot using regular expressions on
the column titles.
- * Creates an HTML tree of HTML files and GIF plots.
+ * Creates an HTML tree of HTML files and PNG plots.
* Creates an index of URL links listing all available targets.
* Creates an index of URL links listing all different plot types.
* No separate CGI set up required.
* Can be run under cron or it can sleep itself waiting for file updates
based on when the file was last updated.
-Orca is based the RRD tool by Tobias Oetiker. While it is similar to the
-other tools based on RRD, such as Cricket and MRTG, it is significantly
-different. To see these other tools, examine
+Orca is similar to but substantially different from other tools that
+record and display hourly, daily, monthly, and yearly data, such as
+MRTG and Cricket. To see these other tools, examine
http://ee-staff.ethz.ch/~oetiker/webtools/mrtg/mrtg.html
@@ -3372,7 +3588,7 @@
=head1 EXAMPLES
-A small static example of Orca is at
+A static example of Orca is at
http://www.geocities.com/~bzking/orca-example/
@@ -3381,16 +3597,20 @@
=head1 COMMAND LINE OPTIONS
-Orca has only three command line options. They are:
+Orca has only four command line options. They are:
+
+B<-gifs>: Generate GIFs instead of PNGs. Tell Orca to generate GIFs
+instead of PNGs. You may not want to generate GIFs since PNGs are 1/3
+the size of GIFs and take less time to generate.
B<-o>: Once. This tells Orca to go through the steps of finding files,
-updating the RRDs, updating the GIFs, and creating the HTML files once.
+updating the RRDs, updating the PNGs, and creating the HTML files once.
Normally, Orca loops continuously looking for new and updated files.
B<-r>: RRD only. Have Orca only update its RRD files. Do not generate
-any HTML or GIF files. This is useful if you are loading in a large
+any HTML or PNG files. This is useful if you are loading in a large
amount of data in several invocations of Orca and do not want to create
-the HTML and GIF files in each run since it is time consuming.
+the HTML and PNG files in each run since it is time consuming.
B<-v>: Verbose. Have Orca spit out more verbose messages. As you add
more B<-v>'s to the command line, more messages are sent out. Any more
@@ -3404,12 +3624,13 @@
=head1 ARCHITECTURE ISSUES
Because Orca is extremely IO intensive, I recommend that the host that
-locally mounts the web server content be the same machine that runs Orca.
-In addition, the RRD data files that Orca uses also require a good amount
-of IO. The machine running Orca should always have the B<data_dir>
-directory locally mounted. It is more important this B<data_dir>
-be locally stored than B<html_dir> for performance concerns. The two
-options B<data_dir> and B<html_dir> are described in more detail below.
+locally mounts the RRD data files be the same machine that runs Orca.
+In addition, the HTML and image files that Orca creates also require a
+good amount of IO. The machine running Orca should always have the
+B<rrd_dir> directory locally mounted. It is more important this
+B<rrd_dir> be locally stored than B<html_dir> for performance concerns.
+The two options B<html_dir> and B<rrd_dir> are described in more detail
+below.
=head1 INSTALLATION AND CONFIGURATION
@@ -3417,7 +3638,7 @@
instructs Orca on what to do. The configuration file is based on a
key/value pair structure. The key name must start at the beginning of
a line. Lines that begin with whitespace are concatenated onto the last
-key's value. This is the same format as used by MRTG and Cricket.
+key's value.
There are three main groups of options in a Orca confg: general options,
file specific options, and plot specific options. General options may
@@ -3444,7 +3665,7 @@
=item B<html_dir> I<directory>
B<html_dir> specifies the root directory for the main index.html and
-all underlying HTML and GIF files that Orca generates. This should
+all underlying HTML and PNG files that Orca generates. This should
not be a directory that normal users will edit. Ideally this directory
should be on a disk locally attached to the host running Orca, but is
not necessary.
@@ -3452,27 +3673,27 @@
If I<directory> does not begin with a / and the B<base_dir> option was
set, then the B<base_dir> directory will be prepended to I<directory>.
-=item B<data_dir> I<directory>
+=item B<rrd_dir> I<directory>
-B<data_dir> specifies the root directory for the location of the RRD data
+B<rrd_dir> specifies the root directory for the location of the RRD data
files that Orca generates. For best performance, this directory should
be on a disk locally attached to the host running Orca. Otherwise,
the many IO operations that Orca performs will be greatly slowed down.
-It is more important this B<data_dir> be locally stored than B<html_dir>
+It is more important this B<rrd_dir> be locally stored than B<html_dir>
for performance concerns.
If I<directory> does not begin with a / and the B<base_dir> option was
set, then the B<base_dir> directory will be prepended to I<directory>.
-If B<data_dir> is not defined, then B<base_dir> will be used as B<data_dir>.
-Orca will quit with an error if both B<data_dir> and B<base_dir> are
+If B<rrd_dir> is not defined, then B<base_dir> will be used as B<rrd_dir>.
+Orca will quit with an error if both B<rrd_dir> and B<base_dir> are
not set.
=item B<base_dir> I<directory>
If B<base_dir> is set, then it is used to prepend to any file or directory
based names that do not begin with /. These are currently B<state_file>,
-B<html_dir>, B<data_dir>, and the B<find_files> option in the B<files>
+B<html_dir>, B<rrd_dir>, and the B<find_files> option in the B<group>
options.
=head2 Optional General Options
@@ -3480,7 +3701,7 @@
=item B<late_interval> I<Perl expression>
B<late_interval> is used to calculate the time interval between a
-files last modification time and the time when that file is considered
+file's last modification time and the time when that file is considered
to be late for an update. In this case, an email message may be sent
out using the B<warn_email> addresses. Because different input files
may be updated at different rates, B<late_interval> takes an arbitrary
@@ -3509,10 +3730,10 @@
By default, nobody is emailed.
-=item B<expire_gifs> 1
+=item B<expire_images> 1
-If B<expire_gifs> is set then .meta files will be created for all
-generated GIF files. If the Apache web server 1.3.2 or greater is being
+If B<expire_images> is set then .meta files will be created for all
+generated PNG files. If the Apache web server 1.3.2 or greater is being
used, then the following modifications must added to srm.conf or
httpd.conf.
@@ -3527,7 +3748,7 @@
---
> MetaSuffix .meta
-By default, expiring the GIF files is not enabled.
+By default, expiration of images is not enabled.
=item B<find_times> I<hours:minutes> [I<hours:minutes> ...]
@@ -3565,16 +3786,16 @@
sub_dir 1
-=head2 Files Options
+=head2 Group Options
The next step in configuring Orca is telling where to find the files to
use as input, a description of the columns of data comprising the file,
the interval at which the file is updated, and where the measurement
-time is stored in the file. This is stored into a files set.
+time is stored in the file. This is stored into a group.
-A generic example of the files set and its options are:
+A generic example of a group and its options are:
- files FILES_KEY1 {
+ group GROUP_NAME1 {
find_files filename1 filename2 ...
column_description column1_name column2_name ...
date_source file_mtime
@@ -3584,19 +3805,19 @@
.
}
- files FILES_KEY2 {
+ group GROUP_NAME2 {
.
.
}
-The key for a files set, in this example FILES_KEY1 and FILE_KEY2, is a
+The key for a group, in this example GROUP_NAME1 and GROUP_NAME2, is a
descriptive name that is unique for all files and is used later when the
plots to create are defined. Files that share the same general format
-of column data may be grouped under the same files key. The options
-for a particular files set must be enclosed in the curly brackets {}'s.
-An unlimited number of file sets may be listed.
+of column data may be grouped together. The options
+for a particular group must be enclosed in the curly brackets {}'s.
+An unlimited number of groups may be listed.
-=head2 Required Files Options
+=head2 Required Group Options
=item B<find_files> I<path|regexp> [I<path|regexp> ...]
@@ -3621,14 +3842,14 @@
source1 is data from one place and source2 is data from another place,
then Orca needs to be told to treat the data from each file as distinct
data sources. This be accomplished in two ways. The first is by creating
-another files { ... } option set. However, this requires copying all
+another group { ... } set. However, this requires copying all
of the text and makes maintenance of the configuration file complex.
The second and recommend approach is to place ()'s around parts of the
regular expression to tell Orca how to distinguish the two data files:
find_files /data/(source\d)
-This creates two "groups", one named source1 and the other named source2
+This creates two groups, one named source1 and the other named source2
which will be plotted separately. One more example:
find_files /data/solaris.*/(.*)/percol-\d{4}-\d{2}-\d{2}
@@ -3643,14 +3864,14 @@
and treat the files in the olympia and sunridge directories as distinct,
but the files within each directory as from the same data source.
-If any of the paths or regular expressions given to B<find_Files> do not
+If any of the paths or regular expressions given to B<find_files> do not
begin with a / and the B<base_dir> option was set, then the B<base_dir>
directory will be prepended to the path or regular expression.
=item B<interval> I<seconds>
The B<interval> options takes the number of seconds between updates for
-the input data files listed in this files set.
+the input data files listed in this group.
=item B<column_description> I<column_name> [I<column_name> ...]
@@ -3659,7 +3880,7 @@
For Orca to plot the data, it needs to be told what each column of
data holds. This is accomplished by creating a text description for
each column. There are two ways this may be loaded into Orca. If the
-input data files for a files set do not change, then the column names
+input data files for a group do not change, then the column names
can be listed after B<column_description>:
column_description date in_packets/s out_packets/s
@@ -3671,7 +3892,7 @@
This informs Orca that it should read the first line of all the input
data files for the column description. Orca can handle different files
-in the same files set that have different number of columns and column
+in the same group that have different number of columns and column
descriptions. The only limitation here is that column descriptions
are white space separated and therefore, no spaces are allowed in the
column descriptions.
@@ -3696,7 +3917,7 @@
=item B<reopen> 1
-Using the B<reopen> option for a files set instructs Orca to close
+Using the B<reopen> option for a group instructs Orca to close
and reopen any input data files when there is new data to be read.
This is of most use when an input data file is erased and rewritten by
some other process.
@@ -3708,7 +3929,7 @@
plot {
title Plot title
- source FILES_KEY1
+ source GROUP_NAME1
data column_name1
data 1024 * column_name2 + column_name3
legend First column
@@ -3720,7 +3941,7 @@
.
}
-Unlike the files set, there is no key for generating a plot. An unlimited
+Unlike the group, there is no key for generating a plot. An unlimited
number of plots can be created.
Some of the plot options if they have the two characters %g or %G
@@ -3737,24 +3958,24 @@
=head2 Required Plot Options
-=item B<source> I<files_key>
+=item B<source> I<group_name>
-The B<source> argument should be a single key name for a files set from
-which data will be plotted. Currently, only data from a single files
-set may be put into a single plot.
+The B<source> argument should be a single group name from which data will
+be plotted. Currently, only data from a single group may be put into a
+single plot.
=item B<data> I<Perl expression>
=item B<data> I<regular expression>
The B<data> plot option tells Orca the data sources to use to place
-in a single GIF plot. At least one B<data> option is required for a
+in a single PNG plot. At least one B<data> option is required for a
particular plot and as many as needed may be placed into a single plot.
Two forms of arguments to B<data> are allowed. The first form
allows arbitrary Perl expressions, including mathematical expressions,
that result in a number as a data source to plot. The expression may
-contain the names of the columns as found in the files set given to the
+contain the names of the columns as found in the group given to the
B<source> option. The column names must be separated with white space
from any other characters in the expression. For example, if you have
number of bytes per second input and output and you want to plot the
@@ -3774,7 +3995,7 @@
the portion of the match in the ()'s is placed into the normal Perl $1,
$2, etc variables. Take the following configuration for example:
- files throughput {
+ group throughput {
find_files /data/solaris.*/(.*)/percol-\d{4}-\d{2}-\d{2}
column_description hme0Ipkt/s hme0Opkt/s
hme1Ipkt/s hme1Opkt/s
@@ -3934,17 +4155,23 @@
in the options for a particular plot.
-=head2 GIF Plot Plotting Options
+=head2 PNG Plot Plotting Options
+
+=item B<base> I<number>
+
+If memory is being plotted (and not network traffic) this value should
+be set to 1024 so that one Kb is 1024 bytes. For traffic measurements,
+1 Kb/s is 1000 b/s. By default, a base of 1000 is used.
=item B<plot_width> I<number>
Using the B<plot_width> option specifies how many pixels wide the drawing
-area inside the GIF is.
+area inside the PNG is.
=item B<plot_height> I<number>
Using the B<plot_height> option specifies how many pixels high the
-drawing area inside the GIF is.
+drawing area inside the PNG is.
=item B<plot_min> I<number>
@@ -3958,26 +4185,31 @@
By default this will be auto-configured from the data you select with
the graphing functions.
-=item B<rigid_min_max> 1
+=item B<rigid_min_max>
Normally Orca will automatically expand the lower and upper limit if
the graph contains a value outside the valid range. By setting the
B<rigid_min_max> option, this is disabled.
+=item B<logarithmic>
+
+Normally Orca will use a linear scale for the Y axis. If a plot contains
+this option, then a logarithmic scale will be used.
+
=item B<title> <text>
Setting the B<title> option sets the title of the plot. If you place
%g or %G in the title, it is replaced with the text matched by any
-()'s in the files set B<find_files> option. %g gets replaced with the
+()'s in the group B<find_files> option. %g gets replaced with the
exact text matched by the ()'s and %G is replaced with the same text,
except the first character is capitalized.
=item B<y_legend> <text>
Setting B<y_legend> sets the text to be displayed along the Y axis of
-the GIF plot.
+the PNG plot.
-=head2 Multiple GIF Plot Ploting Options
+=head2 Multiple PNG Plot Ploting Options
The following options should be specified multiple times for each data
source in the plot.
@@ -3995,7 +4227,7 @@
=item B<legend> I<text>
The B<legend> option specifies for a single data source the comment that
-is placed below the GIF plot.
+is placed below the PNG plot.
=head1 MAILING LISTS
Modified: trunk/orca/CHANGES
==============================================================================
--- trunk/orca/CHANGES (original)
+++ trunk/orca/CHANGES Sat Jul 13 19:22:26 2002
@@ -1,3 +1,381 @@
+Thu Oct 14 14:44:00 PDT 1999
+
+ Release Orca 0.24.
+
+Thu Oct 14 12:17:29 PDT 1999
+
+ Back down to RRDtool 1.0.7 with only my filename length patch
+ applied per Tobi.
+
+ Revamp the INSTALL, NEWS, and other files to get a release out.
+
+ Up the version number of the included RRDs package to 1.000072
+ since it contains some patches specifically for Orca.
+
+Wed Oct 13 12:40:29 PDT 1999
+
+ Orcallator.se 1.19 prevents a division by zero in calculating
+ the mean_disk_busy if the number of disks on the system is 0.
+
+ Move the URL http://www.geocities.com/~bzking/orcallator-docs/
+ to http://www.geocities.com/~bzking/docs/orcallator.html.
+
+ In cases where there are a large number of data sources in a
+ single image, then the list of available colors is expired.
+ Now reuse the colors.
+
+ Make sure plots are displayed in each subgroup's HTML files in
+ the order they appear in the configuration file.
+
+Tue Oct 12 10:27:44 PDT 1999
+
+ Update orcallator.cfg to remove the maximum and average disk busy
+ plot and add a plot that displays the run percent for each disk.
+
+ Orcallator 1.18 now renames disk_runp.c?t?d? to disk_runp_c?t?d?.
+
+Mon Oct 11 14:31:38 PDT 1999
+
+ Update orcallator.cfg to plot the disk space and inode usage
+ for all locally mounted filesystems.
+
+ Apply a patch to RRDtool that increases the maximum filename
+ length for generated image files from 255 to 1024.
+
+ Rename &strip_key_name to &escape_name.
+
+Fri Oct 8 14:23:37 PDT 1999
+
+ Save even more space in Orca generated HTML and image filenames.
+ On large directories, up to 10% disk space can be saved alone
+ in filenames according to du. Also, if generated filenames are
+ longer than 235 characters, then compute a MD5 hash of the name,
+ trim the name down to 210 characters, and append the MD5 hash.
+
+ Orcallator.se 1.17 now skips locally mounted /cdrom filesystems.
+
+ Increase the number of default known colors to 11.
+
+Wed Oct 6 17:59:05 PDT 1999
+
+ Orcallator.se 1.16 now compresses its log files after it has
+ completed a day's log using the command in the COMPRESSOR
+ environmental variable. start_orcallator now passed this
+ environmental variable to orcallator.se.
+
+Wed Oct 6 15:19:09 PDT 1999
+
+ Have configure.in look for bzip2, bunzip2, gzip, gunzip, compress,
+ and uncompress. Define the COMPRESSOR and UNCOMPRESSOR_PIPE as
+ the best compression tool to use and add these names to the list
+ of names being substituted. This only gets defined if both a
+ compress and uncompression tool is used.
+
+ To keep backwards compatibility in orcallator.se, define WATCH_WEB
+ if WATCH_HTTPD is defined.
+
+Tue Oct 5 14:54:59 PDT 1999
+
+ Update orcallator.se to 1.15. kvm$mpid is a int not a long.
+ This caused problems on Solaris 7 hosts running a 64 bit kernel.
+
+Fri Oct 1 12:15:47 PDT 1999
+
+ Update orcallator.se to 1.14. Rename disk.c?t?d? column names
+ to disk_runp.c?t?d? to better reflect the data being recorded
+ and to allow for more per disk information later.
+
+ Install the latest RRDtool patch.
+
+Thu Sep 29 19:19:23 PDT 1999
+
+ Add some more space between a plot's legend and the minimum,
+ maximum, etc information for a particular piece of data.
+
+ Allow mathematical expressions in plots where the first data
+ contains a regular expression.
+
+Fri Sep 24 11:50:33 PDT 1999
+
+ Many fixes and improvements to orcallator.se. Fix a bug in the
+ disk_mean calculation where it was being divided by the wrong
+ disk_count. Now it should be much larger and in scale with
+ disk_peak. When WATCH_DISK is defined, now print each disk's
+ run percent. Add a new define WATCH_MOUNTS, which reports each
+ mount point's disk space and inode capacity, usage, available
+ for non-root users and percent used. This comes from Duncan
+ Lawie tyger at hoopoes.com. Add some smarts so that if the number
+ of interfaces, physical disk, or mounted partitions changes,
+ then a new header is printed. This will prevent column name
+ and data mixups when the system configuration changes.
+
+Mon Sep 20 11:18:21 PDT 1999
+
+ Include Storable 0.6.5.
+
+Tue Sep 14 15:54:38 PDT 1999
+
+ Add the page scan rate to orcallator.se as column scanrate.
+
+Tue Sep 7 12:21:52 PDT 1999
+
+ Update the archive URLs for the orca-* mailing lists.
+
+Thu Sep 2 11:03:19 PDT 1999
+
+ Include and require RRDtool 1.0.7.
+
+ Make some more minor changes to have Orca run with Perl 5.004_01.
+
+Tue Aug 24 11:32:34 PDT 1999
+
+ Include and require Math::Interpolate 1.05.
+
+Thu Aug 19 14:13:16 PDT 1999
+
+ No longer include Compress::Zlib, which was to be used to read in
+ compressed source data files without spawning a separate process.
+ Now it seems easier to just call gunzip -c.
+
+Wed Aug 18 11:10:21 PDT 1999
+
+ Read in .gz files using gunzip, .Z files using uncompress,
+ and .bz2 files with bunzip2.
+
+ Clean up some bugs where the RRDs::graph -r option would always
+ be used.
+
+ Plots can now take a logarithmic option which specified that
+ the Y axis will have a logarithmic scale.
+
+Tue Aug 17 11:50:34 PDT 1999
+
+ Include and require Math::Interpolate 1.04.
+
+Mon Aug 16 12:11:48 PDT 1999
+
+ Add locking on a particular configuration file. Make a directory
+ using the configuration filename.
+
+ Include and require RRDtool 1.0.6.
+
+Fri Aug 13 17:10:23 PDT 1999
+
+ Install librrd.so on Solaris hosts in $libdir so that
+ orcallator.se can attach to it.
+
+Fri Aug 13 15:35:21 PDT 1999
+
+ Include and require RRDtool 1.0.5.
+
+Mon Aug 9 10:32:42 PDT 1999
+
+ Include and require RRDtool 1.0.4.
+
+Fri Aug 6 09:29:14 PDT 1999
+
+ Include Digest::MD5 2.09 but continue to only require 2.00.
+
+Thu Aug 5 20:58:32 PDT 1999
+
+ Make sure start_orcallator looks to see if SE is defined. If it
+ is not, then print a message stating where to find the SE toolkit.
+
+Fri Jul 30 09:12:20 PDT 1999
+
+ Update orcallator.cfg to plot the new process spawn rate.
+
+ Include and require RRDtool 1.0.3.
+
+Thu Jul 29 10:36:20 PDT 1999
+
+ To fix the problem of too long HTML and image file names,
+ shorten the names as they get passed through strip_key_name.
+ Now orcallator -> o, orca -> o, _times_ -> _X_, _percent_ ->
+ _pct_, _number_ -> _num_. The upgrade_installation will perform
+ these renames on a list of directories.
+
+ Since orcallator is only one of many different data gatherers,
+ move it into its own directory.
+
+Wed Jul 28 10:10:53 PDT 1999
+
+ If WATCH_CPU is defined and if the user can read /dev/kmem then
+ orcallator.se will now measure the process spawn rate over a 5
+ minute interval and also record the maximum rate measured over
+ a 5 second interval.
+
+ Add a href field for a plot in the configuration file. This,
+ if defined, lets you make a particular target type be a HREF to
+ read something about that target type.
+
+ Include Digest::MD5 2.08 but continue to only require 2.00.
+
+Fri Jul 23 13:41:36 PDT 1999
+
+ Include and require RRDtool 1.0.1.
+
+Thu Jul 22 17:06:45 PDT 1999
+
+ Use a blessed reference to an array for the Orca::DataFile,
+ Orca::HTMLFile, Orca::ImageFile, Orca::OpenFileHash,
+ Orca::RRDFile, and Orca::OpenFileHash classes.
+
+Mon Jul 19 12:57:56 PDT 1999
+
+ On a fresh install make sure to mkdir RRD_DIR/orcallator.
+
+ Change the tag name data_dir to rrd_dir to make it clearer what
+ kind of data is being loaded.
+
+ Integrate building of Compress::Zlib into the Makefile structure.
+ Make use of the Zlib 1.1.3 library included with RRDtool for
+ Compress::Zlib.
+
+Fri Jul 16 11:12:02 PDT 1999
+
+ Include and require RRDtool 1.0.0 and Compress::Zlib 1.05.
+
+ Update the colors in orcallator.cfg. In all plots, make one
+ plot use area.
+
+Thu Jul 15 11:35:52 PDT 1999
+
+ As noted by Bob Hoekstra <Bob_Hoekstra at merck.com>, orcallator.se
+ was outputting the same information for ??load as for ??runq.
+ Orcallator.se no longer generates that data and orcallator.cfg
+ no longer generates plots for it.
+
+Wed Jul 14 11:04:09 PDT 1999
+
+ Switch to use PNGs instead of GIFs. They take up 1/3 less space
+ than GIFs and at least 10% less time to generate.
+
+ Revamp the INSTALL document to reflect the new * -> _times_
+ change and add some documentation of the file descriptor limit.
+
+ Uncompress and read .Z and .gz compressed input files on the fly.
+
+Wed Jul 14 10:22:13 PDT 1999
+
+ Include and require RRDtool 0.99.52.
+
+Tue Jul 13 12:34:29 PDT 1999
+
+ Include Storable 0.604 but continue to only require 0.603.
+
+Thu Jul 8 16:22:45 PDT 1999
+
+ Include and require RRDtool 0.99.50.
+
+Wed Jul 7 13:26:55 PDT 1999
+
+ Rename the migrate_to_orcallator script to upgrade_installation.
+
+ Per Vadim Shulkin <vadim.shulkin at csfb.com> request, replace the
+ * in all generated filenames with _times_. Put this rename in
+ the new upgrade_installation script.
+
+Tue Jul 6 18:14:20 PDT 1999
+
+ Include and require RRDtool 0.99.49.
+
+Sun Jul 4 16:57:28 PDT 1999
+
+ Include and require RRDtool 0.99.47.
+
+Fri Jul 2 18:16:21 PDT 1999
+
+ Include and require RRDtool 0.99.46.
+
+Tue Jun 29 13:18:57 PDT 1999
+
+ Add a new option for each plot named base that allows the base
+ for each plot to be either 1000 or 1024. This allows for plotting
+ of memory or network type of datas.
+
+ Use the new base option in the number of bytes of available swap
+ space plots.
+
+ Include and require RRDtool 0.99.45.
+
+ Make use of RRDtool's 0.99.45 capability to place the SI number
+ suffix to numbers appearing in the legend using the GPRINT
+ command.
+
+Mon Jun 28 11:16:46 PDT 1999
+
+ Now only print found filenames if the verbose level is above one.
+ Orca was generating too much output.
+
+ When creating all the HTML files, do not write directly to the
+ new filename, instead write to a temporary filename and when
+ the file is closed, then rename it to the final name. This lets
+ the user view existing HTML files even when the new HTML files
+ are being updated.
+
+ Include RRDtool 0.99.41 but continue to only require 0.99.29.
+
+Tue Jun 22 10:54:39 PDT 1999
+
+ Include RRDtool 0.99.40 but continue to only require 0.99.29.
+
+Thu Jun 10 09:39:36 PDT 1999
+
+ When 80% of a host's processes are web serving processes, plotting
+ the total system and number of httpd's together on the same plot
+ makes sense. However, for people running a tiny web server with
+ only a few processes, this does not make sense, so a separate
+ plot is created. Noted by Paul Company <paul.company at plpt.com>.
+
+Tue Jun 8 10:57:39 PDT 1999
+
+ Make the table of contents in INSTALL consistent with the contents
+ of the file.
+
+Mon Jun 7 13:01:05 PDT 1999
+
+ Include RRDtool 0.99.32 but continue to only require 0.99.29.
+
+Thu Jun 3 13:27:09 PDT 1999
+
+ Fix a bug in Orca::SourceDataFile::add_plots where it was
+ dereferencing an undef.
+
+ Since orcallator.se may watch many different types of web
+ server logs, move some of the common code into CPP defines.
+ Defines are preferred over functions since the function call
+ overhead in SE is larger than having the code inlined.
+
+ Have orcallator.se watch Yahoo! style web access logs if
+ -DWATCH_YAHOO is passed on the command line.
+
+ If any of the environmental variables used by orcallator.se were
+ defined then the value was used, even if the length was zero.
+ Now the length of the variable must be non-zero for the value
+ to be used, otherwise the default value is used.
+
+Wed Jun 2 12:11:44 PDT 1999
+
+ Optimize orcallator.se slightly when it determines the size bin
+ for a particular file served.
+
+ Change the define WATCH_HTTPD in orcallator.se to WATCH_WEB to
+ be consistent with the other WEB_* defines and environmental
+ variables.
+
+Tue Jun 1 21:53:15 PDT 1999
+
+ For orcallator.se, if the WEB_SERVER environmental variable
+ is set, then the value of of WEB_SERVER is used to count the
+ number of web server processes. If WEB_SERVER is not defined
+ it defaults to httpd.
+
+Fri May 28 17:39:36 PDT 1999
+
+ Add to the plot title the type of the plot, i.e. 'Yearly',
+ in every GIF. Requested by Tom Murray <murray at reston.wcom.net>.
+
Thu May 27 10:53:02 PDT 1999
Release version 0.23.
@@ -25,11 +403,11 @@
Include RRDtool 0.99.31 but continue to only require 0.99.29.
- Add a new quarterly plot that shows the last 100 days. This is
+ Add a new quarterly plot that shows the last 100 days. This is
a nice transition between the monthly and yearly plots.
Change the number of days shown in the yearly plot from 500 to
- 428, which is one year and two months. Reducing the number
+ 428, which is one year and two months. Reducing the number
of days from 500 by at least one day will speed up the GIF
generation time because by default the plot portion of the GIFs
are 500 pixels wide.
@@ -44,24 +422,29 @@
Put a closing ) in a error message in RRDtool 0.99.29.1.
- Fix a bug in queue_data where it was sending data to
- rrd_update that was already in the RRD file.
+ Fix a bug in queue_data where it was sending data to rrd_update
+ that was already in the RRD file.
Wed May 19 10:43:48 PDT 1999
- Restructured the code so each Orca::SourceDataFile has only
- one anonymous subroutine to read in all the valued from a single
+ Remove the load_state and save_state subroutines and replace
+ them with an object-oriented Orca::State class for saving
+ information between Orca invocations. Use the Storable class
+ to save information instead of a text based method.
+
+ Restructured the code so each Orca::SourceDataFile has only one
+ anonymous subroutine to read in all the valued from a single
line of the source data file. This sped Orca up slightly.
- Add a -r option to Orca to have it not create any HTML or
- GIF files and only update the RRD files.
+ Add a -r option to Orca to have it not create any HTML or GIF
+ files and only update the RRD files.
- Fix a bug where if the number of columns changed in from one
- source file to the next source file in a files group, the
- column index used to get a particular value is the old index
- for the old file and not the new index for the new file. This
- fix involved having the Orca::SourceDataFile object run the
- anonymous subroutines to pick the correct data from a line
+ Fix a bug where if the number of columns changed in from
+ one source file to the next source file in a files group,
+ the column index used to get a particular value is the old
+ index for the old file and not the new index for the new file.
+ This fix involved having the Orca::SourceDataFile object run
+ the anonymous subroutines to pick the correct data from a line
instead of Orca::RRDFile.
Remove some unused methods: Orca::OpenFileHash::list,
@@ -107,7 +490,7 @@
The paths used to find input files are now passed through
the following regular expressions: s:^\\./:: and s:/\\./:/:g.
- These remove unneccessary searches through the current directory.
+ These remove unnecessary searches through the current directory.
Perl_glob will only return found files and no other type.
It will also not follow any directories named `..'.
Added: trunk/orca/docs/manual.html
==============================================================================
--- trunk/orca/docs/manual.html (original)
+++ trunk/orca/docs/manual.html Sat Jul 13 19:22:26 2002
@@ -0,0 +1,98 @@
+CPU Utilization doesn't break out on a multiprocessor system.
+In fact, no explaination on how it works on such a platform.
+I guess I'll look at the source.
+I'm assuming it aggregates all processors.
+
+--pjc
+
+And none of the give a small paragraph description
+of the 33 Data Sets.
+
+As I mentioned before, most of these are self explainitory,
+but some need more explaination. For example,
+ how large is a packet &/or segment (I know they're variable length),
+ what is a Nocanput Rate,
+ what is the unit, the y-axis (Disk Busy Measure) on Peak & Mean Disk
+Busy
+ ...
+ I'm looking them up in Adrian's book, but it would be nice if they
+were
+ included in the documentation for orcallator.
+
+ Average # Processes in Run Queue
+ System Load
+ CPU Usage
+ Number of System & Httpd Processes
+ Web Server Hit Rate
+ Web Server File Size
+ Web Server Data Transfer Rate
+ Web Server HTTP Error Rate
+ Bits Per Second: <interface>
+ Packets Per Second: <interface>
+ Errors Per Second: <interface>
+ Ethernet Nocanput Rate
+ Ethernet Deferred Packet Rate
+ Ethernet Collisions
+ TCP Bits Per Second
+ TCP Segments Per Second
+ TCP Retransmission & Duplicate Received Percentage
+ TCP New Connection Rate
+ TCP Number Open Connections
+ TCP Reset Rate
+ TCP Attempt Fail Rate
+ TCP Listen Drop Rate
+ Sleeps on Mutex Rate
+ NFS Call Rate
+ NFS Timeouts & Bad Transmits Rate
+ Peak & Mean Disk Busy
+ Cache Hit Percentages
+ Cache Reference Rate
+ Inode Steal Rate
+ Available Swap Space
+ Page Residence Time
+ Page Usage
+ Pages Locked & IO
+
+-----Original Message-----
+From: Blair Zajac [mailto:bzajac at geostaff.com]
+Sent: Wednesday, June 09, 1999 10:45 AM
+To: Company, Paul
+Subject: Re: Orca observations
+
+
+Hello Paul,
+
+True on the first observation, not always true on the bits per second
+depending on the OS version and the support for the different
+Ethernet port types. On 2.6 or greater, I believe all hme, le, etc
+measure bits per second, but on older OSes, you cannot get these
+on all Ethernet types.
+
+What do you mean by "No definitions for Data Sets"? Is this missing
+from the Orca manual?
+
+Thanks for the notes, I'll put them in the next release of Orca.
+
+Blair
+
+
+"Company, Paul" wrote:
+>
+> Observations on Solaris 2.x:
+>
+> + Average # Processes in Run Queue
+> and
+> System Load
+> are identical (y axis the same - should they be, no!)
+> + Bits Per Second does not work, although the Packets Per Second
+> does.
+> + No definitions for Data Sets
+>
+> File this stuff away or not.
+> Just though you might want this data.
+> I'll send you the fixes if I find them.
+>
+> --pjc
+>
+>
+
Added: trunk/orca/docs/FAQ
==============================================================================
--- trunk/orca/docs/FAQ (original)
+++ trunk/orca/docs/FAQ Sat Jul 13 19:22:26 2002
@@ -0,0 +1,7 @@
+Orcallator:
+ 1) Why are my Ethernet bits/second measurements all 0?
+
+ On 2.5.1 or older Solaris operating system releases, the kernel does
+ not measure the bits/second going through a particular device. I
+ believe some later kernel and device driver patches may fix this,
+ but Solaris 2.6 and greater definitely does measure this.
Modified: trunk/orca/NEWS
==============================================================================
--- trunk/orca/NEWS (original)
+++ trunk/orca/NEWS Sat Jul 13 19:22:26 2002
@@ -1,9 +1,122 @@
-New in version 0.23.
+New in Orca version 0.24.
+ 1) Installation notes. Due to the way Orca generated image and HTML
+ files are named, read the INSTALL file. Otherwise, you will have
+ some left over cruft that will waste disk space and you will have
+ to reload all of your source data files.
+ 2) Orca now runs under older Perls: Perl 5.004_01 or later.
+ 3) Switch to generate PNGs instead of GIFs. They take up 1/3 less disk
+ space and are created at least 10% faster. If you want Orca to
+ generate GIFs instead of PNGs, give it the -gifs flag.
+ 4) Read in .gz files using gunzip, .Z files using uncompress, and .bz2
+ files with bunzip2.
+ 5) Add to the plot title the type of the plot (i.e. 'Yearly') in
+ every image.
+ 6) Add a href field for a plot. This, if defined, appends a HREF to a
+ target name letting you point the user to a page to get more
+ information. The default orcallator.cfg has these added to point
+ to the new documentation web page
+ http://www.geocities.com/~bzking/docs/orcallator.html for all
+ orcallator.se recorded data.
+ 7) Add a new option named base that lets the user specify either a base
+ of 1000 or 1024 for autoscaling plots. This is useful for memory
+ (base 1024) vs network (base 1000) measurements and is used in
+ generating correctly calculating the base to use in calculating
+ the Y axis.
+ 8) The word logarithmic can now be used for a plot to create a
+ logarithmic Y axis scale.
+ 9) Orca no longer lists all the source files it finds to shorten verbose
+ output. This output is now generated if the verbose level is greater
+ than one.
+10) Do not overwrite existing HTML files when creating new versions until
+ the new version is finished. This allows people to better view
+ existing pages until the new page is completely finished.
+11) All generated HTML and image filenames are now created using
+ a different set of mappings. Now
+ orcallator -> o
+ orca -> o
+ _times_ -> _X_
+ _percent_ -> _pct_
+ _number_ -> _num_.
+ All older installations of Orca will need to be renamed unless you
+ want to load in all your data again. You can perform this renaming
+ on your files by running make upgrade, or if you have directories
+ that are not normally covered by the Orca install, run
+ src/upgrade_installation followed by the list of directories to search
+ in. Pass the -n flag to upgrade_installation if you want to see
+ the renames that will be performed without actually performing them.
+12) New HTML and image filenames are shorter, which can save 10% in
+ disk space on a large installation according to du -k. Also, now
+ plots containing arbitrary many data sources can be plotted without
+ exceeding the maximum file lengths.
+13) Add locking so that only one Orca can run on a single configuration
+ file at one time.
+14) Include and require RRDtool 1.0.7.2 and Math::Interpolate 1.05.
+ Include Data::Dumper 2.101, Digest::MD5 2.09, Storable 0.6.5, with
+ Orca.
+
+ These following changes are what's new in orcallator.se 1.19 since
+ version 1.7 which was included with Orca 0.23. All of the changes
+ below are taken advantage of in the included orcallator.cfg and
+ start_orcallator files.
+
+15) Orcallator.se now has a web page describing the various measurements
+ it makes. See http://www.geocities.com/~bzking/docs/orcallator.html.
+16) If the environmental variable WEB_SERVER is defined, use its value of
+ the as the name of the process to count for the number of web
+ servers on the system. If WEB_SERVER is not defined, then count
+ number of httpd's.
+17) If the COMPRESSOR environmental variable is defined, then when a new
+ log file is opened for a new day, the just closed log file is
+ compressed using the COMPRESSOR command in the following manner:
+ system(sprintf("%s %s &", COMPRESSOR, log_file)
+ COMPRESSOR should be set to something like "gzip -9", or "compress",
+ or "bzip2 -9". If the configure script finds both a compressor and
+ uncompressor tool, such as both gzip and gunzip, then start_orcallator
+ will inform orcallator.se to compress the log files at the end of a
+ day.
+18) New measurements. The first column lists the define that must
+ be passed to the SE interpreter to obtain the measurement. If you
+ use the WATCH_OS define, then all of these are now measured.
+ WATCH_MUTEX - ncpus - number of CPUs on the system
+ WATCH_CPU - #proc/s - 5 minute average process spawn rate if root
+ WATCH_CPU - #proc/p5s - maximum 5 second process spawn rate if root
+ WATCH_CPU - scanrate - page scan rate in pages per second
+ WATCH_DISK - disk_runp_c\d+t\d+d\d+ - run percent for each disk
+ WATCH_DISK - disk_rd/s - system wide read operations per second
+ WATCH_DISK - disk_wr/s - system wide write operations per second
+ WATCH_DISK - disk_rK/s - system wide kilobytes read per second
+ WATCH_DISK - disk_wK/s - system wide kilobytes written per second
+ WATCH_RAM - freememK - number of free kilobytes of memory on the system
+ WATCH_MOUNTS - mntC_* - capacity of disk in kilobytes
+ WATCH_MOUNTS - mntc_* - inode capacity of disk
+ WATCH_MOUNTS - mntU_* - used capacity of disk in kilobytes
+ WATCH_MOUNTS - mntu_* - used inode capacity of disk
+ WATCH_MOUNTS - mntA_* - available kilobytes for non-root users
+ WATCH_MOUNTS - mnta_* - available inodes for non-root users
+ WATCH_MOUNTS - mntP_* - percentage of kilobytes used for non-root users
+ WATCH_MOUNTS - mntp_* - percentage of inodes used for non-root users
+19) Add some smarts so that if the number of interfaces, physical
+ disks, or mounted partitions changes, then a new header is printed.
+ This will prevent column name and data mixups when the system
+ configuration changes.
+20) Prevent a division by zero in calculating the mean_disk_busy if the
+ number of disks on the system is 0.
+21) Fix a bug in the disk_mean calculation where it was being divided
+ by the wrong disk_count. Now it should be much larger and in scale
+ with disk_peak.
+22) Increase the number of characters for each network interface from four
+ to five.
+23) If WATCH_YAHOO is defined, then process the access log as a Yahoo!
+ style access log.
+24) Restructure the code to handle different web server access log formats
+ easier.
+
+New in Orca version 0.23.
1) Fix two important bugs in orcallator_running and start_orcallator
which where using old PERCOLLATOR_DIR variables instead of the new
ORCALLATOR_DIR.
-New in version 0.22.
+New in Orca version 0.22.
1) Add a new command line option (-r) that tells Orca to only update
the RRD data files and not to generate any HTML or GIF files.
This is useful if you are loading in a large amount of data in several
@@ -20,10 +133,10 @@
4) A temporary fix: Skip and do not complain about input source data
file lines containing the word timestamp in it.
5) Removed some unused methods to make Orca smaller.
- 6) Added some more documentation to orcallater.cfg.
+ 6) Added some more documentation to orcallator.cfg.
7) Make Orca slightly faster.
-New in version 0.21.
+New in Orca version 0.21.
1) Every file containing the words percollator, percol and perc has
been renamed to contain the word orcallator. A new make target named
migrate will change all filenames in an installed Orca directory to
Modified: trunk/orca/README
==============================================================================
--- trunk/orca/README (original)
+++ trunk/orca/README Sat Jul 13 19:22:26 2002
@@ -1,70 +1,78 @@
-This package contains two main tools: Orca and orcallator.se.
-
-Orca
+ORCA
=====
Orca is a tool useful for plotting arbitrary data from text files onto
a directory on a Web server. It has the following features:
- * Configuration file based.
- * Reads white space separated data files.
- * Watches data files for updates and sleeps between reads.
- * Finds new files at specified times.
- * Remembers the last modification times for files so they do not have to
- be reread continuously.
- * Can plot the same type of data from different files into different
- or the same GIFs.
- * Different plots can be created based on the filename.
- * Parses the date from the text files.
- * Create arbitrary plots of data from different columns.
- * Ignore columns or use the same column in many plots.
- * Add or remove columns from plots without having to deleting RRDs.
- * Plot the results of arbitrary Perl expressions, including mathematical
- ones, using one or more columns.
- * Group multiple columns into a single plot using regular expressions on
- the column titles.
- * Creates an HTML tree of HTML files and GIF plots.
- * Creates an index of URL links listing all available targets.
- * Creates an index of URL links listing all different plot types.
- * No separate CGI set up required.
- * Can be run under cron or it can sleep itself waiting for file updates
- based on when the file was last updated.
+ * Creates an HTML tree of HTML and image (PNG or GIF) files.
+ * Creates an index of URL links listing all available targets.
+ * Creates an index of URL links listing all different plot types.
+ * No separate CGI set up required.
+ * Can be run under cron or it can sleep itself waiting for file updates
+ based on when the file was last updated.
+ * Configuration file based.
+ * Reads arbitrarily formatted text or binary data files.
+ * Watches data files for updates and sleeps between reads.
+ * Finds new files at specified times.
+ * Remembers the last modification times for files so they do not have to
+ be reread continuously.
+ * Allows arbitrary grouping of data from different files into the same
+ or different plots.
+ * Allows arbitrary math performed on data read from one file.
An example of the output generated by Orca is located at:
-http://www.geocities.com/~bzking/orca-example/
+ http://www.geocities.com/~bzking/orca-example/
Orca is written completely in Perl. To install, configure and use
-Orca read the INSTALL file. Some sample configuration files for
-Orca can be found in the sample_configs directory.
+Orca, read the INSTALL file. Some sample configuration files for
+Orca can be found in the lib directory.
-Orca is based on the RRD tool written by Tobias Oetiker. To really
-understand Orca you should understand RRD. Get RRD from
+For an overview of the design decisions that made Orca what it is today,
+read the article
-http://ee-staff.ethz.ch/~oetiker/webtools/rrdtool/
+ http://www.sunworld.com/sunworldonline/swol-07-1999/swol-07-realtime.html
-I recommend reading the documentation that comes with RRD. It will
+Orca is based on the RRDtool written by Tobias Oetiker. To really
+understand Orca and how it saves and manages your data, you should
+understand RRDtool, which serves as the backend binary data filestore.
+I recommend reading the
+documentation that comes with RRDtool. It will
explain how the data files Orca uses are created, maintained, and
-used to create the GIFs that Orca creates.
+used to create the images that Orca creates. Read about RRDtool
+at
-orcallator.se
-==============
+ http://ee-staff.ethz.ch/~oetiker/webtools/rrdtool/
+
+DATA COLLECTION TOOLS
+=====================
+
+Orca does not generate the data itself that it plots. This is left for
+other programs. Currently only one such data gathering and measuring
+program is included here with Orca.
+
+ orcallator.se
+ ==============
+
+ The other tool in this package is an updated version of orcallator.se
+ written by Adrian Cockcroft. Percollator.se is a tool written for Solaris
+ SPARC and Solaris x86 that collects a large amount of system and web
+ server statistics and prints them into a file for later processing
+ and plotting. For documentation on the original orcallator.se tool,
+ see the URL http://www.sunworld.com/swol-03-1996/swol-03-perf.html
-The other tool in this package is an updated version of orcallator.se
-written by Adrian Cockcroft. Percollator.se is a tool written for Solaris
-SPARC and Solaris x86 that collects a large amount of system and web
-server statistics and prints them into a file for later processing
-and plotting. For documentation on the original orcallator.se tool,
-see the URL http://www.sunworld.com/swol-03-1996/swol-03-perf.html
-
-This version of orcallator.se collects much more data than the original
-on Solaris systems. I have designed an Orca configuration file designed
-to read the output of this orcallator. Sample output from this set up
-is displayed at
+ This version of orcallator.se collects much more data than the original
+ on Solaris systems. I have designed an Orca configuration file designed
+ to read the output of this orcallator. Sample output from this set up
+ is displayed at
-http://www.geocities.com/~bzking/orca-example/
+ http://www.geocities.com/~bzking/orca-example/
-AVAILABLE AT
+ Documentation on the data that orcallator.se collects can be viewed at
+
+ http://www.geocities.com/~bzking/docs/orcallator.html
+
+DOWNLOAD AT
============
These tools are available for download from
@@ -74,10 +82,48 @@
MAILING LISTS
=============
-Discussions regarding Orca take place on the mrtg-developers mailing
-list located at mrtg-developers at list.ee.ethz.ch. To place yourself
-on the mailing list, send a message with the word subscribe to it
-to mrtg-developers-request at list.ee.ethz.ch.
+Four mailing lists exist for Orca. To subscribe to any of the mailing
+lists, please visit the URL listed below. You have the option of choosing a
+digest form of the mailing list if you wish it when you subscribe to
+the mailing list or anytime thereafter. To send email to any of these
+lists you must subscribe to the list.
+
+orca-announce at onelist.com
+ Subscribe http://www.onelist.com/subscribe/orca-announce
+ Archive http://www.onelist.com/archive/orca-announce
+
+ This is a LOW volume moderated mailing list for announcing stable
+ releases of Orca.
+
+orca-help at onelist.com
+ Subscribe http://www.onelist.com/subscribe/orca-help
+ Archive http://www.onelist.com/archive/orca-help
+
+ This mailing list is a first stop mailing list for getting help in
+ setting up and getting Orca running. Problems relating to downloading,
+ configuring, compiling the necessary Perl modules, and installing Orca
+ belong here. People interested anything more than this, such as
+ developing data gathering modules or active Perl development, should be
+ on one or both of the following mailing lists. Once you get Orca
+ running to your satisfaction, you may want to remove yourself from this
+ list.
+
+orca-discuss at onelist.com
+ Subscribe http://www.onelist.com/subscribe/orca-discuss
+ Archive http://www.onelist.com/archive/orca-discuss
+
+ This mailing list is for active users of Orca who are doing new
+ interesting things with Orca and want to discuss Orca but are not
+ interested in actively developing Orca source code. These people are
+ also not interested in helping people get Orca running on their systems.
+
+orca-developers at onelist.com
+ Subscribe http://www.onelist.com/subscribe/orca-developers
+ Archive http://www.onelist.com/archive/orca-developers
+
+ This mailing list is for people who are interested in actively developing,
+ fixing and improving Orca's source code and related data gathering
+ modules, and porting Orca to new platforms.
INSTALLATION
============
@@ -87,5 +133,18 @@
AUTHOR
======
-These two tools were written by Blair Zajac <bzajac at geostaff.com>. I
-welcome any patches for bugs or improvements, comments and suggestions.
+These two tools were written by Blair Zajac. I welcome any patches for
+bugs or improvements, comments and suggestions. Please send any Orca
+correspondence to orca-users at onelist.com or orca-developers at onelist.com,
+which I read and participate on.
+
+If you wish to contact me directly, my email address is
+bzajac at geostaff.com.
+
+NAMING OF ORCA
+==============
+
+I originally named Orca FMRTG, but after asking my wife
+http://www.rothschildimage.com for some suggestions, she came up with
+Orca. It turns out that there are only one or two small programs on
+the Internet named Orca, so we both were happy to hear this.
Modified: trunk/orca/packages/Storable-0.6.5/t/dump.pl
==============================================================================
--- trunk/orca/packages/Storable-0.6.5/t/dump.pl (original)
+++ trunk/orca/packages/Storable-0.6.5/t/dump.pl Sat Jul 13 19:22:26 2002
@@ -1,4 +1,4 @@
-;# $Id: dump.pl,v 0.6 1998/06/04 16:08:27 ram Exp $
+;# $Id: dump.pl,v 0.6 1998/06/04 16:08:27 ram Exp ram $
;#
;# Copyright (c) 1995-1998, Raphael Manfredi
;#
Modified: trunk/orca/packages/Storable-0.6.5/t/freeze.t
==============================================================================
--- trunk/orca/packages/Storable-0.6.5/t/freeze.t (original)
+++ trunk/orca/packages/Storable-0.6.5/t/freeze.t Sat Jul 13 19:22:26 2002
@@ -1,6 +1,6 @@
#!./perl
-# $Id: freeze.t,v 0.6.1.1 1998/06/12 09:47:08 ram Exp $
+# $Id: freeze.t,v 0.6.1.1 1998/06/12 09:47:08 ram Exp ram $
#
# Copyright (c) 1995-1998, Raphael Manfredi
#
Modified: trunk/orca/packages/Storable-0.6.5/t/forgive.t
==============================================================================
--- trunk/orca/packages/Storable-0.6.5/t/forgive.t (original)
+++ trunk/orca/packages/Storable-0.6.5/t/forgive.t Sat Jul 13 19:22:26 2002
@@ -1,6 +1,6 @@
#!./perl
-# $Id: forgive.t,v 0.6 1998/06/04 16:08:38 ram Exp $
+# $Id: forgive.t,v 0.6 1998/06/04 16:08:38 ram Exp ram $
#
# Copyright (c) 1995-1998, Raphael Manfredi
#
Modified: trunk/orca/packages/Storable-0.6.5/t/dclone.t
==============================================================================
--- trunk/orca/packages/Storable-0.6.5/t/dclone.t (original)
+++ trunk/orca/packages/Storable-0.6.5/t/dclone.t Sat Jul 13 19:22:27 2002
@@ -1,6 +1,6 @@
#!./perl
-# $Id: dclone.t,v 0.6 1998/06/04 16:08:25 ram Exp $
+# $Id: dclone.t,v 0.6 1998/06/04 16:08:25 ram Exp ram $
#
# Copyright (c) 1995-1998, Raphael Manfredi
#
Modified: trunk/orca/packages/Storable-0.6.5/t/retrieve.t
==============================================================================
--- trunk/orca/packages/Storable-0.6.5/t/retrieve.t (original)
+++ trunk/orca/packages/Storable-0.6.5/t/retrieve.t Sat Jul 13 19:22:27 2002
@@ -1,6 +1,6 @@
#!./perl
-# $Id: retrieve.t,v 0.6 1998/06/04 16:08:33 ram Exp $
+# $Id: retrieve.t,v 0.6 1998/06/04 16:08:33 ram Exp ram $
#
# Copyright (c) 1995-1998, Raphael Manfredi
#
Modified: trunk/orca/packages/Storable-0.6.5/t/tied.t
==============================================================================
--- trunk/orca/packages/Storable-0.6.5/t/tied.t (original)
+++ trunk/orca/packages/Storable-0.6.5/t/tied.t Sat Jul 13 19:22:27 2002
@@ -1,6 +1,6 @@
#!./perl
-# $Id: tied.t,v 0.6 1998/06/04 16:08:40 ram Exp $
+# $Id: tied.t,v 0.6 1998/06/04 16:08:40 ram Exp ram $
#
# Copyright (c) 1995-1998, Raphael Manfredi
#
Modified: trunk/orca/packages/Storable-0.6.5/t/canonical.t
==============================================================================
--- trunk/orca/packages/Storable-0.6.5/t/canonical.t (original)
+++ trunk/orca/packages/Storable-0.6.5/t/canonical.t Sat Jul 13 19:22:27 2002
@@ -1,6 +1,6 @@
#!./perl
-# $Id: canonical.t,v 0.6 1998/06/04 16:08:24 ram Exp $
+# $Id: canonical.t,v 0.6 1998/06/04 16:08:24 ram Exp ram $
#
# Copyright (c) 1995-1998, Raphael Manfredi
#
Modified: trunk/orca/packages/Storable-0.6.5/t/store.t
==============================================================================
--- trunk/orca/packages/Storable-0.6.5/t/store.t (original)
+++ trunk/orca/packages/Storable-0.6.5/t/store.t Sat Jul 13 19:22:27 2002
@@ -1,6 +1,6 @@
#!./perl
-# $Id: store.t,v 0.6 1998/06/04 16:08:35 ram Exp $
+# $Id: store.t,v 0.6 1998/06/04 16:08:35 ram Exp ram $
#
# Copyright (c) 1995-1998, Raphael Manfredi
#
Modified: trunk/orca/packages/Storable-0.6.5/Storable.xs
==============================================================================
--- trunk/orca/packages/Storable-0.6.5/Storable.xs (original)
+++ trunk/orca/packages/Storable-0.6.5/Storable.xs Sat Jul 13 19:22:28 2002
@@ -3,7 +3,7 @@
*/
/*
- * $Id: Storable.xs,v 0.6.1.3 1998/07/03 11:36:09 ram Exp $
+ * $Id: Storable.xs,v 0.6.1.5 1999/09/14 20:12:29 ram Exp ram $
*
* Copyright (c) 1995-1998, Raphael Manfredi
*
@@ -11,6 +11,13 @@
* as specified in the README file that comes with the distribution.
*
* $Log: Storable.xs,v $
+ * Revision 0.6.1.5 1999/09/14 20:12:29 ram
+ * patch5: integrated "thread-safe" patch from Murray Nesbitt
+ * patch5: try to avoid compilation warning on 64-bit CPUs
+ *
+ * Revision 0.6.1.4 1999/07/12 12:37:01 ram
+ * patch4: uses new internal PL_* naming convention.
+ *
* Revision 0.6.1.3 1998/07/03 11:36:09 ram
* patch3: fixed compatibility (wrt 0.5 at 9) for retrieval of blessed refs
* patch3: increased store() throughput significantly
@@ -56,6 +63,11 @@
#ifndef newRV_noinc
#define newRV_noinc(sv) ((Sv = newRV(sv)), --SvREFCNT(SvRV(Sv)), Sv)
#endif
+#ifndef ERRSV /* Detects older perls (<= 5.004) */
+#define PL_sv_yes sv_yes
+#define PL_sv_no sv_no
+#define PL_sv_undef sv_undef
+#endif
#ifndef HvSHAREKEYS_off
#define HvSHAREKEYS_off(hv) /* Ignore */
#endif
@@ -96,9 +108,9 @@
#define SX_TIED_ARRAY C(11) /* Tied array forthcoming */
#define SX_TIED_HASH C(12) /* Tied hash forthcoming */
#define SX_TIED_SCALAR C(13) /* Tied scalar forthcoming */
-#define SX_SV_UNDEF C(14) /* Perl's immortal sv_undef */
-#define SX_SV_YES C(15) /* Perl's immortal sv_yes */
-#define SX_SV_NO C(16) /* Perl's immortal sv_no */
+#define SX_SV_UNDEF C(14) /* Perl's immortal PL_sv_undef */
+#define SX_SV_YES C(15) /* Perl's immortal PL_sv_yes */
+#define SX_SV_NO C(16) /* Perl's immortal PL_sv_no */
#define SX_ERROR C(17) /* Error */
/*
@@ -155,16 +167,80 @@
typedef unsigned long stag_t; /* Used by pre-0.6 binary format */
/*
- * XXX multi-threading needs context for the following variables...
+ * The following "thread-safe" related defines were contributed by
+ * Murray Nesbitt <murray at activestate.com> and integrated by RAM, who
+ * only renamed things a little bit to ensure consistency with surrounding
+ * code.
+ *
+ * The patch itself is fairly inefficient since it performs a lookup in
+ * some hash table at the start of every routine. It has to do that in order
+ * to determine the proper context.
+ *
+ * The right solution, naturally, is to change all the signatures to propagate
+ * the context down the call chain and only fetch the per-thread context only
+ * once at the entry point before recursion begins. That's planned for some
+ * day, when Perl's threading model will be stabilized.
+ *
+ * -- RAM, 14/09/1999
+ */
+
+#define MY_VERSION "Storable(" XS_VERSION ")"
+
+typedef struct {
+ HV *hseen; /* which objects have been seen, store time */
+ AV *aseen; /* which objects have been seen, retrieve time */
+ I32 tagnum; /* incremented at store time for each seen object */
+ int netorder; /* true if network order used */
+ int forgive_me; /* whether to be forgiving... */
+ int canonical; /* whether to store hashes sorted by key */
+ struct extendable keybuf; /* for hash key retrieval */
+ struct extendable membuf; /* for memory store/retrieve operations */
+} storable_cxt_t;
+
+#if defined(MULTIPLICITY) || defined(PERL_OBJECT) || defined(PERL_CAPI)
+
+#if (PATCHLEVEL == 4) && (SUBVERSION < 68)
+#define dPERINTERP_SV \
+ SV *perinterp_sv = perl_get_sv(MY_VERSION, FALSE) \
+#else /* >= perl5.004_68 */
+#define dPERINTERP_SV \
+ SV *perinterp_sv = *hv_fetch(PL_modglobal, \
+ MY_VERSION, sizeof(MY_VERSION)-1, TRUE)
+#endif /* < perl5.004_68 */
+
+#define dPERINTERP_PTR(T,name) \
+ T name = (T)(perinterp_sv && SvIOK(perinterp_sv) \
+ ? SvIVX(perinterp_sv) : NULL)
+#define dPERINTERP \
+ dPERINTERP_SV; \
+ dPERINTERP_PTR(storable_cxt_t *, PERINTERP)
+
+#define INIT_PERINTERP \
+ dPERINTERP; \
+ Newz(0,PERINTERP,1, storable_cxt_t); \
+ sv_setiv(perinterp_sv, (IV)PERINTERP)
+
+#else /* !MULTIPLICITY && !PERL_OBJECT && !PERL_CAPI */
+
+static storable_cxt_t Context;
+#define dPERINTERP typedef int _interp_DBI_dummy
+#define PERINTERP (&Context)
+#define INIT_PERINTERP
+
+#endif /* MULTIPLICITY || PERL_OBJECT || PERL_CAPI */
+
+#define hseen (PERINTERP->hseen)
+#define aseen (PERINTERP->aseen)
+#define tagnum (PERINTERP->tagnum)
+#define netorder (PERINTERP->netorder)
+#define forgive_me (PERINTERP->forgive_me)
+#define canonical (PERINTERP->canonical)
+#define keybuf (PERINTERP->keybuf)
+#define membuf (PERINTERP->membuf)
+
+/*
+ * End of "thread-safe" related definitions.
*/
-static HV *hseen; /* which objects have been seen, store time */
-static AV *aseen; /* which objects have been seen, retrieve time */
-static I32 tagnum; /* incremented at store time for each seen object */
-static int netorder = 0; /* true if network order used */
-static int forgive_me = -1; /* whether to be forgiving... */
-static int canonical; /* whether to store hashes sorted by key */
-struct extendable keybuf; /* for hash key retrieval */
-struct extendable membuf; /* for memory store/retrieve operations */
/*
* key buffer handling
@@ -370,7 +446,7 @@
} while (0)
#define STORE_SCALAR(pv, len) do { \
- if (len < LG_SCALAR) { \
+ if (len <= LG_SCALAR) { \
unsigned char clen = (unsigned char) len; \
PUTMARK(SX_SCALAR); \
PUTMARK(clen); \
@@ -468,6 +544,7 @@
PerlIO *f;
SV *sv;
{
+ dPERINTERP;
TRACEME(("store_ref (0x%lx)", (unsigned long) sv));
PUTMARK(SX_REF);
@@ -490,6 +567,7 @@
PerlIO *f;
SV *sv;
{
+ dPERINTERP;
IV iv;
char *pv;
STRLEN len;
@@ -504,7 +582,7 @@
*/
if (!(flags & SVf_OK)) { /* !SvOK(sv) */
- if (sv == &sv_undef) {
+ if (sv == &PL_sv_undef) {
TRACEME(("immortal undef"));
PUTMARK(SX_SV_UNDEF);
} else {
@@ -539,17 +617,17 @@
* value is false.
*
* The test for a read-only scalar with both POK and NOK set is meant
- * to quickly detect &sv_yes and &sv_no without having to pay the address
- * comparison for each scalar we store.
+ * to quickly detect &PL_sv_yes and &PL_sv_no without having to pay the
+ * address comparison for each scalar we store.
*/
#define SV_MAYBE_IMMORTAL (SVf_READONLY|SVf_POK|SVf_NOK)
if ((flags & SV_MAYBE_IMMORTAL) == SV_MAYBE_IMMORTAL) {
- if (sv == &sv_yes) {
+ if (sv == &PL_sv_yes) {
TRACEME(("immortal yes"));
PUTMARK(SX_SV_YES);
- } else if (sv == &sv_no) {
+ } else if (sv == &PL_sv_no) {
TRACEME(("immortal no"));
PUTMARK(SX_SV_NO);
} else {
@@ -561,8 +639,8 @@
/*
* Will come here from below with pv and len set if double & netorder,
- * or from above if it was readonly, POK and NOK but neither &sv_yes
- * nor &sv_no.
+ * or from above if it was readonly, POK and NOK but neither &PL_sv_yes
+ * nor &PL_sv_no.
*/
string:
@@ -647,6 +725,7 @@
PerlIO *f;
AV *av;
{
+ dPERINTERP;
SV **sav;
I32 len = av_len(av) + 1;
I32 i;
@@ -712,6 +791,7 @@
PerlIO *f;
HV *hv;
{
+ dPERINTERP;
I32 len = HvKEYS(hv);
I32 i;
int ret = 0;
@@ -878,6 +958,7 @@
PerlIO *f;
SV *sv;
{
+ dPERINTERP;
MAGIC *mg;
int ret = 0;
int svt = SvTYPE(sv);
@@ -946,6 +1027,7 @@
PerlIO *f;
SV *sv;
{
+ dPERINTERP;
STRLEN len;
static char buf[80];
@@ -1061,6 +1143,7 @@
PerlIO *f;
SV *sv;
{
+ dPERINTERP;
SV **svh;
int ret;
int type;
@@ -1072,11 +1155,21 @@
* If object has already been stored, do not duplicate data.
* Simply emit the SX_OBJECT marker followed by its tag data.
* The tag is always written in network order.
+ *
+ * NOTA BENE, for 64-bit machines: the "*svh" below does not yield a
+ * real pointer, rather a tag number (watch the insertion code below).
+ * That means it pobably safe to assume it is well under the 32-bit limit,
+ * and makes the truncation safe.
+ * -- RAM, 14/09/1999
*/
svh = hv_fetch(hseen, (char *) &sv, sizeof(sv), FALSE);
if (svh) {
+#if PTRSIZE <= 4
I32 tagval = htonl((I32) (*svh));
+#else
+ I32 tagval = htonl((I32) ((unsigned long) (*svh) & 0xffffffff));
+#endif
TRACEME(("object 0x%lx seen as #%d.", (unsigned long) sv, tagval));
PUTMARK(SX_OBJECT);
WRITE(&tagval, sizeof(I32));
@@ -1091,9 +1184,13 @@
* cast the tagnum to a SV pointer and store that in the hash. This
* means that we must clean up the hash manually afterwards, but gives
* us a 15% throughput increase.
+ *
+ * The (IV) cast below is for 64-bit machines, to avoid warnings from
+ * the compiler. Please, let me know if it does not work.
+ * -- RAM, 14/09/1999
*/
- if (!hv_store(hseen, (char *) &sv, sizeof(sv), (SV*) (tagnum++), 0))
+ if (!hv_store(hseen, (char *) &sv, sizeof(sv), (SV*) (IV) (tagnum++), 0))
return -1;
TRACEME(("recorded 0x%lx as object #%d", (unsigned long) sv, tagnum));
@@ -1154,6 +1251,7 @@
PerlIO *f;
int use_network_order;
{
+ dPERINTERP;
char buf[256]; /* Enough room for 256 hexa digits */
unsigned char c;
@@ -1200,6 +1298,7 @@
SV *sv;
int use_network_order;
{
+ dPERINTERP;
int status;
netorder = use_network_order; /* Global, not suited for multi-thread */
@@ -1271,7 +1370,7 @@
hv_iterinit(hseen);
while (he = hv_iternext(hseen))
- HeVAL(he) = &sv_undef;
+ HeVAL(he) = &PL_sv_undef;
}
hv_undef(hseen); /* Free seen object table */
sv_free((SV *) hseen); /* Free HV */
@@ -1288,6 +1387,7 @@
*/
static SV *mbuf2sv()
{
+ dPERINTERP;
return newSVpv(mbase, MBUF_SIZE());
}
@@ -1300,10 +1400,11 @@
SV *mstore(sv)
SV *sv;
{
+ dPERINTERP;
TRACEME(("mstore"));
MBUF_INIT(0);
if (!do_store(0, sv, FALSE)) /* Not in network order */
- return &sv_undef;
+ return &PL_sv_undef;
return mbuf2sv();
}
@@ -1317,10 +1418,11 @@
SV *net_mstore(sv)
SV *sv;
{
+ dPERINTERP;
TRACEME(("net_mstore"));
MBUF_INIT(0);
if (!do_store(0, sv, TRUE)) /* Use network order */
- return &sv_undef;
+ return &PL_sv_undef;
return mbuf2sv();
}
@@ -1363,6 +1465,7 @@
static SV *retrieve_ref(f)
PerlIO *f;
{
+ dPERINTERP;
SV *rv;
SV *sv;
@@ -1418,6 +1521,7 @@
static SV *retrieve_tied_array(f)
PerlIO *f;
{
+ dPERINTERP;
SV *tv;
SV *sv;
@@ -1448,6 +1552,7 @@
static SV *retrieve_tied_hash(f)
PerlIO *f;
{
+ dPERINTERP;
SV *tv;
SV *sv;
@@ -1477,6 +1582,7 @@
static SV *retrieve_tied_scalar(f)
PerlIO *f;
{
+ dPERINTERP;
SV *tv;
SV *sv;
@@ -1509,6 +1615,7 @@
static SV *retrieve_lscalar(f)
PerlIO *f;
{
+ dPERINTERP;
STRLEN len;
SV *sv;
@@ -1555,6 +1662,7 @@
static SV *retrieve_scalar(f)
PerlIO *f;
{
+ dPERINTERP;
int len;
SV *sv;
@@ -1610,6 +1718,7 @@
static SV *retrieve_integer(f)
PerlIO *f;
{
+ dPERINTERP;
SV *sv;
IV iv;
@@ -1634,6 +1743,7 @@
static SV *retrieve_netint(f)
PerlIO *f;
{
+ dPERINTERP;
SV *sv;
int iv;
@@ -1663,6 +1773,7 @@
static SV *retrieve_double(f)
PerlIO *f;
{
+ dPERINTERP;
SV *sv;
double nv;
@@ -1687,6 +1798,7 @@
static SV *retrieve_byte(f)
PerlIO *f;
{
+ dPERINTERP;
SV *sv;
int siv;
@@ -1710,6 +1822,7 @@
*/
static SV *retrieve_undef()
{
+ dPERINTERP;
SV* sv;
TRACEME(("retrieve_undef"));
@@ -1727,7 +1840,8 @@
*/
static SV *retrieve_sv_undef()
{
- SV *sv = &sv_undef;
+ dPERINTERP;
+ SV *sv = &PL_sv_undef;
TRACEME(("retrieve_sv_undef"));
@@ -1742,7 +1856,8 @@
*/
static SV *retrieve_sv_yes()
{
- SV *sv = &sv_yes;
+ dPERINTERP;
+ SV *sv = &PL_sv_yes;
TRACEME(("retrieve_sv_yes"));
@@ -1757,7 +1872,8 @@
*/
static SV *retrieve_sv_no()
{
- SV *sv = &sv_no;
+ dPERINTERP;
+ SV *sv = &PL_sv_no;
TRACEME(("retrieve_sv_no"));
@@ -1789,6 +1905,7 @@
static SV *retrieve_array(f)
PerlIO *f;
{
+ dPERINTERP;
I32 len;
I32 i;
AV *av;
@@ -1841,6 +1958,7 @@
static SV *retrieve_hash(f)
PerlIO *f;
{
+ dPERINTERP;
I32 len;
I32 size;
I32 i;
@@ -1915,6 +2033,7 @@
static SV *old_retrieve_array(f)
PerlIO *f;
{
+ dPERINTERP;
I32 len;
I32 i;
AV *av;
@@ -1976,6 +2095,7 @@
static SV *old_retrieve_hash(f)
PerlIO *f;
{
+ dPERINTERP;
I32 len;
I32 size;
I32 i;
@@ -2010,12 +2130,12 @@
if (c == SX_VL_UNDEF) {
TRACEME(("(#%d) undef value", i));
/*
- * Due to a bug in hv_store(), it's not possible to pass &sv_undef
- * to hv_store() as a value, otherwise the associated key will
- * not be creatable any more. -- RAM, 14/01/97
+ * Due to a bug in hv_store(), it's not possible to pass
+ * &PL_sv_undef to hv_store() as a value, otherwise the
+ * associated key will not be creatable any more. -- RAM, 14/01/97
*/
if (!sv_h_undef)
- sv_h_undef = newSVsv(&sv_undef);
+ sv_h_undef = newSVsv(&PL_sv_undef);
sv = SvREFCNT_inc(sv_h_undef);
} else if (c == SX_VALUE) {
TRACEME(("(#%d) value", i));
@@ -2101,7 +2221,7 @@
retrieve_other, /* SX_ERROR */
};
-static SV *(**sv_retrieve_vtbl)(); /* One of the above -- XXX for threads*/
+static SV *(**sv_retrieve_vtbl)(); /* One of the above -- XXX for threads */
#define RETRIEVE(x) (*sv_retrieve_vtbl[(x) >= SX_ERROR ? SX_ERROR : (x)])
@@ -2111,7 +2231,7 @@
* Make sure the stored data we're trying to retrieve has been produced
* on an ILP compatible system with the same byteorder. It croaks out in
* case an error is detected. [ILP = integer-long-pointer sizes]
- * Returns null if error is detected, &sv_undef otherwise.
+ * Returns null if error is detected, &PL_sv_undef otherwise.
*
* Note that there's no byte ordering info emitted when network order was
* used at store time.
@@ -2119,6 +2239,7 @@
static SV *magic_check(f)
PerlIO *f;
{
+ dPERINTERP;
char buf[256];
char byteorder[256];
int c;
@@ -2165,7 +2286,7 @@
TRACEME(("binary image version is %d", version));
if (netorder = (use_network_order & 0x1))
- return &sv_undef; /* No byte ordering info */
+ return &PL_sv_undef; /* No byte ordering info */
sprintf(byteorder, "%lx", (unsigned long) BYTEORDER);
GETMARK(c);
@@ -2187,7 +2308,7 @@
if ((int) c != sizeof(char *))
croak("Pointer integer size is not compatible");
- return &sv_undef; /* OK */
+ return &PL_sv_undef; /* OK */
}
/*
@@ -2200,6 +2321,7 @@
static SV *retrieve(f)
PerlIO *f;
{
+ dPERINTERP;
int type;
SV **svh;
SV *sv;
@@ -2348,6 +2470,7 @@
static SV *do_retrieve(f)
PerlIO *f;
{
+ dPERINTERP;
SV *sv;
TRACEME(("do_retrieve"));
@@ -2360,6 +2483,8 @@
if (!magic_check(f))
croak("Magic number checking on perl storable failed");
+ TRACEME(("data stored in %s format", netorder ? "net order" : "native"));
+
/*
* If retrieving an old binary version, the sv_retrieve_vtbl variable is
* set to sv_old_retrieve. We'll need a hash table to keep track of
@@ -2380,7 +2505,7 @@
if (!sv) {
TRACEME(("retrieve ERROR"));
- return &sv_undef; /* Something went wrong, return undef */
+ return &PL_sv_undef; /* Something went wrong, return undef */
}
TRACEME(("retrieve got %s(0x%lx)",
@@ -2424,6 +2549,7 @@
SV *mretrieve(sv)
SV *sv;
{
+ dPERINTERP;
struct extendable mcommon; /* Temporary save area for global */
SV *rsv; /* Retrieved SV pointer */
@@ -2449,13 +2575,14 @@
SV *dclone(sv)
SV *sv;
{
+ dPERINTERP;
int size;
TRACEME(("dclone"));
MBUF_INIT(0);
if (!do_store(0, sv, FALSE)) /* Not in network order! */
- return &sv_undef; /* Error during store */
+ return &PL_sv_undef; /* Error during store */
size = MBUF_SIZE();
TRACEME(("dclone stored %d bytes", size));
@@ -2465,6 +2592,17 @@
}
/*
+ * init_perinterp
+ *
+ * Called once per "thread" (interpreter) to initialize some global context.
+ */
+static void init_perinterp() {
+ INIT_PERINTERP;
+ netorder = 0; /* true if network order used */
+ forgive_me = -1; /* whether to be forgiving... */
+}
+
+/*
* The Perl IO GV object distinguishes between input and output for sockets
* but not for plain files. To allow Storable to transparently work on
* plain files and sockets transparently, we have to ask xsubpp to fetch the
@@ -2483,6 +2621,9 @@
PROTOTYPES: ENABLE
+BOOT:
+ init_perinterp();
+
int
pstore(f,obj)
OutputStream f
Modified: trunk/orca/packages/Storable-0.6.5/Storable.pm
==============================================================================
--- trunk/orca/packages/Storable-0.6.5/Storable.pm (original)
+++ trunk/orca/packages/Storable-0.6.5/Storable.pm Sat Jul 13 19:22:28 2002
@@ -1,4 +1,4 @@
-;# $Id: Storable.pm,v 0.6.1.3 1998/07/03 11:32:52 ram Exp $
+;# $Id: Storable.pm,v 0.6.1.5 1999/09/14 20:11:22 ram Exp ram $
;#
;# Copyright (c) 1995-1998, Raphael Manfredi
;#
@@ -6,6 +6,12 @@
;# as specified in the README file that comes with the distribution.
;#
;# $Log: Storable.pm,v $
+;# Revision 0.6.1.5 1999/09/14 20:11:22 ram
+;# patch5: updated version number
+;#
+;# Revision 0.6.1.4 1999/07/12 12:36:04 ram
+;# patch4: changed my e-mail to pobox, updated version number.
+;#
;# Revision 0.6.1.3 1998/07/03 11:32:52 ram
;# patch3: recent optimizations increased store() throughput
;# patch3: increased revision number
@@ -36,7 +42,7 @@
use Carp;
use vars qw($forgive_me $VERSION);
-$VERSION = '0.603';
+$VERSION = '0.605';
*AUTOLOAD = \&AutoLoader::AUTOLOAD; # Grrr...
bootstrap Storable;
@@ -424,6 +430,6 @@
=head1 AUTHOR
-Raphael Manfredi F<E<lt>Raphael_Manfredi at grenoble.hp.comE<gt>>
+Raphael Manfredi F<E<lt>Raphael_Manfredi at pobox.comE<gt>>
=cut
Modified: trunk/orca/packages/Storable-0.6.5/ChangeLog
==============================================================================
--- trunk/orca/packages/Storable-0.6.5/ChangeLog (original)
+++ trunk/orca/packages/Storable-0.6.5/ChangeLog Sat Jul 13 19:22:29 2002
@@ -1,3 +1,26 @@
+Tue Sep 14 22:13:28 MEST 1999 Raphael Manfredi <Raphael_Manfredi at pobox.com>
+
+. Description:
+
+ Integrated "thread-safe" patch from Murray Nesbitt.
+ Note that this may not be very efficient for threaded code,
+ see comment in the code.
+
+ Try to avoid compilation warning on 64-bit CPUs. Can't test it,
+ since I don't have access to such machines.
+
+Mon Jul 12 14:37:19 METDST 1999 Raphael Manfredi <Raphael_Manfredi at pobox.com>
+
+. Description:
+
+ changed my e-mail to pobox.
+
+ mentionned it is not thread-safe.
+
+ updated version number.
+
+ uses new internal PL_* naming convention.
+
Fri Jul 3 13:38:16 METDST 1998 Raphael Manfredi <Raphael_Manfredi at grenoble.hp.com>
. Description:
Modified: trunk/orca/packages/Storable-0.6.5/Makefile.PL
==============================================================================
--- trunk/orca/packages/Storable-0.6.5/Makefile.PL (original)
+++ trunk/orca/packages/Storable-0.6.5/Makefile.PL Sat Jul 13 19:22:29 2002
@@ -1,4 +1,4 @@
-# $Id: Makefile.PL,v 0.6 1998/06/04 16:08:18 ram Exp $
+# $Id: Makefile.PL,v 0.6 1998/06/04 16:08:18 ram Exp ram $
#
# Copyright (c) 1995-1998, Raphael Manfredi
#
Modified: trunk/orca/packages/Storable-0.6.5/README
==============================================================================
--- trunk/orca/packages/Storable-0.6.5/README (original)
+++ trunk/orca/packages/Storable-0.6.5/README Sat Jul 13 19:22:29 2002
@@ -15,6 +15,7 @@
*** This is beta software -- use at your own risks ***
The Storable extension brings persistency to your data.
+This extension is NOT thread-safe and should not be used by threaded perls.
You may recursively store to disk any data structure, no matter
how complex and circular it is, provided it contains only SCALAR,
@@ -136,7 +137,7 @@
There is an embeded POD manual page in Storable.pm.
-Raphael Manfredi <Raphael_Manfredi at grenoble.hp.com>
+Raphael Manfredi <Raphael_Manfredi at pobox.com>
Thanks to:
Modified: trunk/orca/packages/Storable-0.6.5/patchlevel.h
==============================================================================
--- trunk/orca/packages/Storable-0.6.5/patchlevel.h (original)
+++ trunk/orca/packages/Storable-0.6.5/patchlevel.h Sat Jul 13 19:22:29 2002
@@ -1 +1 @@
-#define PATCHLEVEL 3
+#define PATCHLEVEL 5
Modified: trunk/orca/packages/Makefile.in
==============================================================================
--- trunk/orca/packages/Makefile.in (original)
+++ trunk/orca/packages/Makefile.in Sat Jul 13 19:22:29 2002
@@ -1,114 +1,164 @@
@SET_MAKE@
-PERL = @PERL@
-CFLAGS = @CFLAGS@
-digest_md5_dir = @DIGEST_MD5_DIR@
-math_interpolate_dir = @MATH_INTERPOLATE_DIR@
-rrdtool_dir = @RRDTOOL_DIR@
-storable_dir = @STORABLE_DIR@
-
-MAKE_DIGEST_MD5 = @MAKE_DIGEST_MD5@
-MAKE_MATH_INTERPOLATE = @MAKE_MATH_INTERPOLATE@
-MAKE_RRDTOOL = @MAKE_RRDTOOL@
-MAKE_STORABLE = @MAKE_STORABLE@
-MAKE_TARGETS = $(MAKE_DIGEST_MD5) $(MAKE_MATH_INTERPOLATE) $(MAKE_RRDTOOL) $(MAKE_STORABLE)
-
-TEST_DIGEST_MD5 = @TEST_DIGEST_MD5@
-TEST_MATH_INTERPOLATE = @TEST_MATH_INTERPOLATE@
-TEST_RRDTOOL = @TEST_RRDTOOL@
-TEST_STORABLE = @TEST_STORABLE@
-TEST_TARGETS = $(TEST_DIGEST_MD5) $(TEST_MATH_INTERPOLATE) $(TEST_RRDTOOL) $(TEST_STORABLE)
-
-INSTALL_DIGEST_MD5 = @INSTALL_DIGEST_MD5@
-INSTALL_MATH_INTERPOLATE= @INSTALL_MATH_INTERPOLATE@
-INSTALL_RRDTOOL = @INSTALL_RRDTOOL@
-INSTALL_STORABLE = @INSTALL_STORABLE@
-INSTALL_TARGETS = $(INSTALL_DIGEST_MD5) $(INSTALL_MATH_INTERPOLATE) $(INSTALL_RRDTOOL) $(INSTALL_STORABLE)
-
-CLEAN_DIGEST_MD5 = @CLEAN_DIGEST_MD5@
-CLEAN_MATH_INTERPOLATE = @CLEAN_MATH_INTERPOLATE@
-CLEAN_RRDTOOL = @CLEAN_RRDTOOL@
-CLEAN_STORABLE = @CLEAN_STORABLE@
-CLEAN_TARGETS = $(CLEAN_DIGEST_MD5) $(CLEAN_MATH_INTERPOLATE) $(CLEAN_RRDTOOL) $(CLEAN_STORABLE)
-
-DISTCLEAN_DIGEST_MD5 = @DISTCLEAN_DIGEST_MD5@
-DISTCLEAN_MATH_INTERPOLATE = @DISTCLEAN_MATH_INTERPOLATE@
-DISTCLEAN_RRDTOOL = @DISTCLEAN_RRDTOOL@
-DISTCLEAN_STORABLE = @DISTCLEAN_STORABLE@
-DISTCLEAN_TARGETS = $(DISTCLEAN_DIGEST_MD5) $(DISTCLEAN_MATH_INTERPOLATE) $(DISTCLEAN_RRDTOOL) $(DISTCLEAN_STORABLE)
+PERL = @PERL@
+CFLAGS = @CFLAGS@
-all: Makefile
+compress_zlib_dir = @COMPRESS_ZLIB_DIR@
+data_dumper_dir = @DATA_DUMPER_DIR@
+digest_md5_dir = @DIGEST_MD5_DIR@
+math_interpolate_dir = @MATH_INTERPOLATE_DIR@
+rrdtool_dir = @RRDTOOL_DIR@
+storable_dir = @STORABLE_DIR@
+
+MAKE_COMPRESS_ZLIB = @MAKE_COMPRESS_ZLIB@
+MAKE_DATA_DUMPER = @MAKE_DATA_DUMPER@
+MAKE_DIGEST_MD5 = @MAKE_DIGEST_MD5@
+MAKE_MATH_INTERPOLATE = @MAKE_MATH_INTERPOLATE@
+MAKE_RRDTOOL = @MAKE_RRDTOOL@
+MAKE_STORABLE = @MAKE_STORABLE@
+MAKE_TARGETS = $(MAKE_COMPRESS_ZLIB) $(MAKE_DATA_DUMPER) $(MAKE_DIGEST_MD5) $(MAKE_MATH_INTERPOLATE) $(MAKE_RRDTOOL) $(MAKE_STORABLE)
+
+TEST_COMPRESS_ZLIB = @TEST_COMPRESS_ZLIB@
+TEST_DATA_DUMPER = @TEST_DATA_DUMPER@
+TEST_DIGEST_MD5 = @TEST_DIGEST_MD5@
+TEST_MATH_INTERPOLATE = @TEST_MATH_INTERPOLATE@
+TEST_RRDTOOL = @TEST_RRDTOOL@
+TEST_STORABLE = @TEST_STORABLE@
+TEST_TARGETS = $(TEST_COMPRESS_ZLIB) $(TEST_DATA_DUMPER) $(TEST_DIGEST_MD5) $(TEST_MATH_INTERPOLATE) $(TEST_RRDTOOL) $(TEST_STORABLE)
+
+INSTALL_PERL_COMPRESS_ZLIB = @INSTALL_PERL_COMPRESS_ZLIB@
+INSTALL_PERL_DATA_DUMPER = @INSTALL_PERL_DATA_DUMPER@
+INSTALL_PERL_DIGEST_MD5 = @INSTALL_PERL_DIGEST_MD5@
+INSTALL_PERL_MATH_INTERPOLATE = @INSTALL_PERL_MATH_INTERPOLATE@
+INSTALL_PERL_RRDTOOL = @INSTALL_PERL_RRDTOOL@
+INSTALL_PERL_STORABLE = @INSTALL_PERL_STORABLE@
+INSTALL_PERL_TARGETS = $(INSTALL_PERL_COMPRESS_ZLIB) $(INSTALL_PERL_DATA_DUMPER) $(INSTALL_PERL_DIGEST_MD5) $(INSTALL_PERL_MATH_INTERPOLATE) $(INSTALL_PERL_RRDTOOL) $(INSTALL_PERL_STORABLE)
+
+INSTALL_LIB_RRDTOOL = @INSTALL_LIB_RRDTOOL@
+INSTALL_LIB_TARGETS = $(INSTALL_LIB_RRDTOOL)
+
+CLEAN_COMPRESS_ZLIB = @CLEAN_COMPRESS_ZLIB@
+CLEAN_DATA_DUMPER = @CLEAN_DATA_DUMPER@
+CLEAN_DIGEST_MD5 = @CLEAN_DIGEST_MD5@
+CLEAN_MATH_INTERPOLATE = @CLEAN_MATH_INTERPOLATE@
+CLEAN_RRDTOOL = @CLEAN_RRDTOOL@
+CLEAN_STORABLE = @CLEAN_STORABLE@
+CLEAN_TARGETS = $(CLEAN_COMPRESS_ZLIB) $(CLEAN_DIGEST_MD5) $(CLEAN_MATH_INTERPOLATE) $(CLEAN_RRDTOOL) $(CLEAN_STORABLE)
+
+DISTCLEAN_COMPRESS_ZLIB = @DISTCLEAN_COMPRESS_ZLIB@
+DISTCLEAN_DATA_DUMPER = @DISTCLEAN_DATA_DUMPER@
+DISTCLEAN_DIGEST_MD5 = @DISTCLEAN_DIGEST_MD5@
+DISTCLEAN_MATH_INTERPOLATE = @DISTCLEAN_MATH_INTERPOLATE@
+DISTCLEAN_RRDTOOL = @DISTCLEAN_RRDTOOL@
+DISTCLEAN_STORABLE = @DISTCLEAN_STORABLE@
+DISTCLEAN_TARGETS = $(DISTCLEAN_COMPRESS_ZLIB) $(DISTCLEAN_DATA_DUMPER) $(DISTCLEAN_DIGEST_MD5) $(DISTCLEAN_MATH_INTERPOLATE) $(DISTCLEAN_RRDTOOL) $(DISTCLEAN_STORABLE)
+
+all: Makefile $(MAKE_TARGETS)
+
+make_compress_zlib: make_rrdtool_zlib $(compress_zlib_dir)/Makefile
+ cd $(compress_zlib_dir) && $(MAKE) OPTIMIZE="$(CFLAGS)"
-modules: Makefile $(MAKE_TARGETS)
+$(compress_zlib_dir)/Makefile: $(compress_zlib_dir)/Makefile.PL $(PERL)
+ cd $(compress_zlib_dir) && $(PERL) Makefile.PL
+
+make_data_dumper: $(data_dumper_dir)/Makefile
+ cd $(data_dumper_dir) && $(MAKE) OPTIMIZE="$(CFLAGS)"
+
+$(data_dumper_dir)/Makefile: $(data_dumper_dir)/Makefile.PL $(PERL)
+ cd $(data_dumper_dir) && $(PERL) Makefile.PL
make_digest_md5: $(digest_md5_dir)/Makefile
- (cd $(digest_md5_dir) && $(MAKE) OPTIMIZE="$(CFLAGS)")
+ cd $(digest_md5_dir) && $(MAKE) OPTIMIZE="$(CFLAGS)"
$(digest_md5_dir)/Makefile: $(digest_md5_dir)/Makefile.PL $(PERL)
- (cd $(digest_md5_dir) && $(PERL) Makefile.PL)
+ cd $(digest_md5_dir) && $(PERL) Makefile.PL
make_math_interpolate: $(math_interpolate_dir)/Makefile
- (cd $(math_interpolate_dir) && $(MAKE) OPTIMIZE="$(CFLAGS)")
+ cd $(math_interpolate_dir) && $(MAKE) OPTIMIZE="$(CFLAGS)"
$(math_interpolate_dir)/Makefile: $(math_interpolate_dir)/Makefile.PL $(PERL)
- (cd $(math_interpolate_dir) && $(PERL) Makefile.PL)
+ cd $(math_interpolate_dir) && $(PERL) Makefile.PL
make_rrdtool: $(rrdtool_dir)/Makefile $(PERL)
- (cd $(rrdtool_dir) && $(MAKE) CFLAGS="$(CFLAGS)")
+ cd $(rrdtool_dir) && $(MAKE) CFLAGS="$(CFLAGS)"
+
+make_rrdtool_zlib: $(rrdtool_dir)/Makefile $(PERL)
+ cd $(rrdtool_dir) && $(MAKE) CFLAGS="$(CFLAGS)" zlib-1.1.3/librrd_z.a
$(rrdtool_dir)/Makefile: $(rrdtool_dir)/Makefile.in
- (cd .. && ./configure @CONFIGURE_COMMAND_LINE@)
+ cd .. && ./configure @CONFIGURE_COMMAND_LINE@
make_storable: $(storable_dir)/Makefile
- (cd $(storable_dir) && $(MAKE) OPTIMIZE="$(CFLAGS)")
+ cd $(storable_dir) && $(MAKE) OPTIMIZE="$(CFLAGS)"
$(storable_dir)/Makefile: $(storable_dir)/Makefile.PL $(PERL)
- (cd $(storable_dir) && $(PERL) Makefile.PL)
+ cd $(storable_dir) && $(PERL) Makefile.PL
test:
test_modules: $(TEST_TARGETS)
+test_compress_zlib: make_rrdtool_zlib $(compress_zlib_dir)/Makefile
+ cd $(compress_zlib_dir) && $(MAKE) OPTIMIZE="$(CFLAGS)" test
+
+test_data_dumper: $(data_dumper_dir)/Makefile
+ cd $(data_dumper_dir) && $(MAKE) OPTIMIZE="$(CFLAGS)" test
+
test_digest_md5: $(digest_md5_dir)/Makefile
- (cd $(digest_md5_dir) && $(MAKE) CFLAGS="$(CFLAGS)" test)
+ cd $(digest_md5_dir) && $(MAKE) OPTIMIZE="$(CFLAGS)" test
test_math_interpolate: $(math_interpolate_dir)/Makefile
- (cd $(math_interpolate_dir) && $(MAKE) test)
+ cd $(math_interpolate_dir) && $(MAKE) test
test_rrdtool: make_rrdtool
- (cd $(rrdtool_dir)/perl-shared && $(MAKE) CFLAGS="$(CFLAGS)" test)
+ cd $(rrdtool_dir)/perl-shared && $(MAKE) CFLAGS="$(CFLAGS)" test
-test_storable: make_storable
- (cd $(storable_dir) && $(MAKE) CFLAGS="$(CFLAGS)" test)
+test_storable: $(storable_dir)/Makefile
+ cd $(storable_dir) && $(MAKE) OPTIMIZE="$(CFLAGS)" test
-install:
+install: $(INSTALL_LIB_TARGETS)
-install_modules: $(INSTALL_TARGETS)
+install_lib_rrdtool: make_rrdtool
+ cd $(rrdtool_dir)/src && $(MAKE) CFLAGS="$(CFLAGS)" install-libLTLIBRARIES
-install_digest_md5: make_digest_md5
- (cd $(digest_md5_dir) && $(MAKE) install)
+install_modules: $(INSTALL_PERL_TARGETS)
-install_math_interpolate: make_math_interpolate
- (cd $(math_interpolate_dir) && $(MAKE) install)
+install_perl_compress_zlib: make_compress_zlib
+ cd $(compress_zlib_dir) && $(MAKE) OPTIMIZE="$(CFLAGS)" install
-install_rrdtool: make_rrdtool
- (cd $(rrdtool_dir)/perl-shared && $(MAKE) install)
+install_perl_data_dumper: $(data_dumper_dir)/Makefile
+ cd $(data_dumper_dir) && $(MAKE) OPTIMIZE="$(CFLAGS)" install
-install_storable: make_storable
- (cd $(storable_dir) && $(MAKE) install)
+install_perl_digest_md5: $(digest_md5_dir)/Makefile
+ cd $(digest_md5_dir) && $(MAKE) OPTIMIZE="$(CFLAGS)" install
+
+install_perl_math_interpolate: $(math_interpolate_dir)/Makefile
+ cd $(math_interpolate_dir) && $(MAKE) OPTIMIZE="$(CFLAGS)" install
+
+install_perl_rrdtool: make_rrdtool
+ cd $(rrdtool_dir)/perl-shared && $(MAKE) OPTIMIZE="$(CFLAGS)" install
+
+install_perl_storable: $(storable_dir)/Makefile
+ cd $(storable_dir) && $(MAKE) OPTIMIZE="$(CFLAGS)" install
clean: $(CLEAN_TARGETS)
+clean_compress_zlib: clean_rrdtool
+ @if test -r $(compress_zlib_dir)/Makefile; then \
+ echo 'cd $(compress_zlib_dir) && $(MAKE) clean'; \
+ (cd $(compress_zlib_dir) && $(MAKE) clean); \
+ fi
+
clean_digest_md5:
@if test -r $(digest_md5_dir)/Makefile; then \
- echo '(cd $(digest_md5_dir); $(MAKE) clean'; \
- (cd $(digest_md5_dir); $(MAKE) clean); \
+ echo 'cd $(digest_md5_dir) && $(MAKE) clean'; \
+ (cd $(digest_md5_dir) && $(MAKE) clean); \
fi
clean_math_interpolate:
@if test -r $(math_interpolate_dir)/Makefile; then \
- echo '(cd $(math_interpolate_dir); $(MAKE) clean)'; \
- (cd $(math_interpolate_dir); $(MAKE) clean); \
+ echo 'cd $(math_interpolate_dir) && $(MAKE) clean'; \
+ (cd $(math_interpolate_dir) && $(MAKE) clean); \
fi
clean_rrdtool:
@@ -116,21 +166,23 @@
clean_storable:
@if test -r $(storable_dir)/Makefile; then \
- echo '(cd $(storable_dir); $(MAKE) clean)'; \
- (cd $(storable_dir); $(MAKE) clean); \
+ echo 'cd $(storable_dir) && $(MAKE) clean'; \
+ (cd $(storable_dir) && $(MAKE) clean); \
fi
distclean: $(DISTCLEAN_TARGETS)
+distclean_compress_zlib: clean_compress_zlib
+
distclean_digest_md5: clean_digest_md5
distclean_math_interpolate: clean_math_interpolate
distclean_rrdtool: clean_rrdtool
- (cd $(rrdtool_dir) && $(MAKE) distclean)
+ cd $(rrdtool_dir) && $(MAKE) distclean
distclean_storable: clean_storable
Makefile: Makefile.in
- (cd ..; ./config.status)
+ cd .. && CONFIG_FILES=packages/Makefile ./config.status
$(MAKE)
Added: trunk/orca/packages/Data-Dumper-2.101/Dumper.html
==============================================================================
--- trunk/orca/packages/Data-Dumper-2.101/Dumper.html (original)
+++ trunk/orca/packages/Data-Dumper-2.101/Dumper.html Sat Jul 13 19:22:29 2002
@@ -0,0 +1,495 @@
+<HTML>
+<HEAD>
+<TITLE>Data::Dumper - stringified perl data structures, suitable for both printing and C<eval></TITLE>
+<LINK REV="made" HREF="mailto:gsar at umich.edu">
+</HEAD>
+
+<BODY>
+
+<!-- INDEX BEGIN -->
+
+<UL>
+
+ <LI><A HREF="#NAME">NAME</A>
+ <LI><A HREF="#SYNOPSIS">SYNOPSIS</A>
+ <LI><A HREF="#DESCRIPTION">DESCRIPTION</A>
+ <UL>
+
+ <LI><A HREF="#Methods">Methods</A>
+ <LI><A HREF="#Functions">Functions</A>
+ <LI><A HREF="#Configuration_Variables_or_Metho">Configuration Variables or Methods</A>
+ <LI><A HREF="#Exports">Exports</A>
+ </UL>
+
+ <LI><A HREF="#EXAMPLES">EXAMPLES</A>
+ <LI><A HREF="#BUGS">BUGS</A>
+ <LI><A HREF="#AUTHOR">AUTHOR</A>
+ <LI><A HREF="#VERSION">VERSION</A>
+ <LI><A HREF="#SEE_ALSO">SEE ALSO</A>
+</UL>
+<!-- INDEX END -->
+
+<HR>
+<P>
+<H1><A NAME="NAME">NAME</A></H1>
+<P>
+Data::Dumper - stringified perl data structures, suitable for both printing
+and <CODE>eval</CODE>
+
+
+
+<P>
+<HR>
+<H1><A NAME="SYNOPSIS">SYNOPSIS</A></H1>
+<P>
+<PRE> use Data::Dumper;
+</PRE>
+<P>
+<PRE> # simple procedural interface
+ print Dumper($foo, $bar);
+</PRE>
+<P>
+<PRE> # extended usage with names
+ print Data::Dumper->Dump([$foo, $bar], [qw(foo *ary)]);
+</PRE>
+<P>
+<PRE> # configuration variables
+ {
+ local $Data::Dump::Purity = 1;
+ eval Data::Dumper->Dump([$foo, $bar], [qw(foo *ary)]);
+ }
+</PRE>
+<P>
+<PRE> # OO usage
+ $d = Data::Dumper->new([$foo, $bar], [qw(foo *ary)]);
+ ...
+ print $d->Dump;
+ ...
+ $d->Purity(1)->Terse(1)->Deepcopy(1);
+ eval $d->Dump;
+</PRE>
+<P>
+<HR>
+<H1><A NAME="DESCRIPTION">DESCRIPTION</A></H1>
+<P>
+Given a list of scalars or reference variables, writes out their contents
+in perl syntax. The references can also be objects. The contents of each
+variable is output in a single Perl statement. Handles self-referential
+structures correctly.
+
+<P>
+The return value can be <CODE>eval</CODE>ed to get back an identical copy of the original reference structure.
+
+<P>
+Any references that are the same as one of those passed in will be named
+<CODE>$VAR</CODE><EM>n</EM> (where <EM>n</EM> is a numeric suffix), and other duplicate references to substructures
+within <CODE>$VAR</CODE><EM>n</EM> will be appropriately labeled using arrow notation. You can specify names
+for individual values to be dumped if you use the <CODE>Dump()</CODE> method, or you can change the default <CODE>$VAR</CODE> prefix to something else. See <CODE>$Data::Dumper::Varname</CODE> and <CODE>$Data::Dumper::Terse</CODE>
+below.
+
+<P>
+The default output of self-referential structures can be <CODE>eval</CODE>ed, but the nested references to <CODE>$VAR</CODE><EM>n</EM> will be undefined, since a recursive structure cannot be constructed using
+one Perl statement. You should set the
+<CODE>Purity</CODE> flag to 1 to get additional statements that will correctly fill in these
+references.
+
+<P>
+In the extended usage form, the references to be dumped can be given
+user-specified names. If a name begins with a <CODE>*</CODE>, the output will describe the dereferenced type of the supplied reference
+for hashes and arrays, and coderefs. Output of names will be avoided where
+possible if the <CODE>Terse</CODE> flag is set.
+
+<P>
+In many cases, methods that are used to set the internal state of the
+object will return the object itself, so method calls can be conveniently
+chained together.
+
+<P>
+Several styles of output are possible, all controlled by setting the <CODE>Indent</CODE> flag. See <A HREF="#Configuration_Variables_or_Metho">Configuration Variables or Methods</A> below for details.
+
+<P>
+<HR>
+<H2><A NAME="Methods">Methods</A></H2>
+<DL>
+<DT><STRONG><A NAME="item_PACKAGE">PACKAGE->new(ARRAYREF [, ARRAYREF])</A></STRONG><DD>
+<P>
+Returns a newly created <CODE>Data::Dumper</CODE> object. The first argument is an anonymous array of values to be dumped.
+The optional second argument is an anonymous array of names for the values.
+The names need not have a leading
+<CODE>$</CODE> sign, and must be comprised of alphanumeric characters. You can begin a
+name with a <CODE>*</CODE> to specify that the dereferenced type must be dumped instead of the
+reference itself, for ARRAY and HASH references.
+
+<P>
+The prefix specified by <CODE>$Data::Dumper::Varname</CODE> will be used with a numeric suffix if the name for a value is undefined.
+
+<P>
+Data::Dumper will catalog all references encountered while dumping the
+values. Cross-references (in the form of names of substructures in perl
+syntax) will be inserted at all possible points, preserving any structural
+interdependencies in the original set of values. Structure traversal is
+depth-first, and proceeds in order from the first supplied value to the
+last.
+
+<DT><STRONG><A NAME="item__OBJ_Dump">$OBJ->Dump or PACKAGE->Dump(ARRAYREF [, ARRAYREF])</A></STRONG><DD>
+<P>
+Returns the stringified form of the values stored in the object (preserving
+the order in which they were supplied to <CODE>new</CODE>), subject to the configuration options below. In an array context, it
+returns a list of strings corresponding to the supplied values.
+
+<P>
+The second form, for convenience, simply calls the <CODE>new</CODE> method on its arguments before dumping the object immediately.
+
+<DT><STRONG><A NAME="item__OBJ_Dumpxs">$OBJ->Dumpxs or PACKAGE->Dumpxs(ARRAYREF [, ARRAYREF])</A></STRONG><DD>
+<P>
+This method is available if you were able to compile and install the XSUB
+extension to <CODE>Data::Dumper</CODE>. It is exactly identical to the <CODE>Dump</CODE> method above, only about 4 to 5 times faster, since it is written entirely
+in C.
+
+<DT><STRONG><A NAME="item__OBJ_Seen_HASHREF_">$OBJ->Seen([HASHREF])</A></STRONG><DD>
+<P>
+Queries or adds to the internal table of already encountered references.
+You must use <CODE>Reset</CODE> to explicitly clear the table if needed. Such references are not dumped;
+instead, their names are inserted wherever they are encountered
+subsequently. This is useful especially for properly dumping subroutine
+references.
+
+<P>
+Expects a anonymous hash of name => value pairs. Same rules apply for
+names as in <CODE>new</CODE>. If no argument is supplied, will return the ``seen'' list of name =>
+value pairs, in an array context. Otherwise, returns the object itself.
+
+<DT><STRONG><A NAME="item__OBJ_Values_ARRAYREF_">$OBJ->Values([ARRAYREF])</A></STRONG><DD>
+<P>
+Queries or replaces the internal array of values that will be dumped. When
+called without arguments, returns the values. Otherwise, returns the object
+itself.
+
+<DT><STRONG><A NAME="item__OBJ_Names_ARRAYREF_">$OBJ->Names([ARRAYREF])</A></STRONG><DD>
+<P>
+Queries or replaces the internal array of user supplied names for the
+values that will be dumped. When called without arguments, returns the
+names. Otherwise, returns the object itself.
+
+<DT><STRONG><A NAME="item__OBJ_Reset">$OBJ->Reset</A></STRONG><DD>
+<P>
+Clears the internal table of ``seen'' references and returns the object
+itself.
+
+</DL>
+<P>
+<HR>
+<H2><A NAME="Functions">Functions</A></H2>
+<DL>
+<DT><STRONG><A NAME="item_Dumper">Dumper(LIST)</A></STRONG><DD>
+<P>
+Returns the stringified form of the values in the list, subject to the
+configuration options below. The values will be named <CODE>$VAR</CODE><EM>n</EM> in the output, where <EM>n</EM> is a numeric suffix. Will return a list of strings in an array context.
+
+<DT><STRONG><A NAME="item_DumperX">DumperX(LIST)</A></STRONG><DD>
+<P>
+Identical to the <A HREF="#item_Dumper">Dumper()</A> function above, but this calls the XSUB implementation. Only available if
+you were able to compile and install the XSUB extensions in <CODE>Data::Dumper</CODE>.
+
+</DL>
+<P>
+<HR>
+<H2><A NAME="Configuration_Variables_or_Metho">Configuration Variables or Methods</A></H2>
+<P>
+Several configuration variables can be used to control the kind of output
+generated when using the procedural interface. These variables are usually
+<CODE>local</CODE>ized in a block so that other parts of the code are not affected by the
+change.
+
+<P>
+These variables determine the default state of the object created by
+calling the <CODE>new</CODE> method, but cannot be used to alter the state of the object thereafter. The
+equivalent method names should be used instead to query or set the internal
+state of the object.
+
+<P>
+The method forms return the object itself when called with arguments, so
+that they can be chained together nicely.
+
+<DL>
+<DT><STRONG><A NAME="item__Data_Dumper_Indent">$Data::Dumper::Indent or $OBJ->Indent([NEWVAL])</A></STRONG><DD>
+<P>
+Controls the style of indentation. It can be set to 0, 1, 2 or 3. Style 0
+spews output without any newlines, indentation, or spaces between list
+items. It is the most compact format possible that can still be called
+valid perl. Style 1 outputs a readable form with newlines but no fancy
+indentation (each level in the structure is simply indented by a fixed
+amount of whitespace). Style 2 (the default) outputs a very readable form
+which takes into account the length of hash keys (so the hash value lines
+up). Style 3 is like style 2, but also annotates the elements of arrays
+with their index (but the comment is on its own line, so array output
+consumes twice the number of lines). Style 2 is the default.
+
+<DT><STRONG><A NAME="item__Data_Dumper_Purity">$Data::Dumper::Purity or $OBJ->Purity([NEWVAL])</A></STRONG><DD>
+<P>
+Controls the degree to which the output can be <CODE>eval</CODE>ed to recreate the supplied reference structures. Setting it to 1 will
+output additional perl statements that will correctly recreate nested
+references. The default is 0.
+
+<DT><STRONG><A NAME="item__Data_Dumper_Pad">$Data::Dumper::Pad or $OBJ->Pad([NEWVAL])</A></STRONG><DD>
+<P>
+Specifies the string that will be prefixed to every line of the output.
+Empty string by default.
+
+<DT><STRONG><A NAME="item__Data_Dumper_Varname">$Data::Dumper::Varname or $OBJ->Varname([NEWVAL])</A></STRONG><DD>
+<P>
+Contains the prefix to use for tagging variable names in the output. The
+default is ``VAR''.
+
+<DT><STRONG><A NAME="item__Data_Dumper_Useqq">$Data::Dumper::Useqq or $OBJ->Useqq([NEWVAL])</A></STRONG><DD>
+<P>
+When set, enables the use of double quotes for representing string values.
+Whitespace other than space will be represented as <CODE>[\n\t\r]</CODE>, ``unsafe'' characters will be backslashed, and unprintable characters
+will be output as quoted octal integers. Since setting this variable
+imposes a performance penalty, the default is 0. The <CODE>Dumpxs()</CODE> method does not honor this flag yet.
+
+<DT><STRONG><A NAME="item__Data_Dumper_Terse">$Data::Dumper::Terse or $OBJ->Terse([NEWVAL])</A></STRONG><DD>
+<P>
+When set, Data::Dumper will emit single, non-self-referential values as
+atoms/terms rather than statements. This means that the <CODE>$VAR</CODE><EM>n</EM> names will be avoided where possible, but be advised that such output may
+not always be parseable by <CODE>eval</CODE>.
+
+<DT><STRONG><A NAME="item__Data_Dumper_Freezer">$Data::Dumper::Freezer or $OBJ->Freezer([NEWVAL])</A></STRONG><DD>
+<P>
+Can be set to a method name, or to an empty string to disable the feature.
+Data::Dumper will invoke that method via the object before attempting to
+stringify it. This method can alter the contents of the object (if, for
+instance, it contains data allocated from C), and even rebless it in a
+different package. The client is responsible for making sure the specified
+method can be called via the object, and that the object ends up containing
+only perl data types after the method has been called. Defaults to an empty
+string.
+
+<DT><STRONG><A NAME="item__Data_Dumper_Toaster">$Data::Dumper::Toaster or $OBJ->Toaster([NEWVAL])</A></STRONG><DD>
+<P>
+Can be set to a method name, or to an empty string to disable the feature.
+Data::Dumper will emit a method call for any objects that are to be dumped
+using the syntax <CODE>bless(DATA, CLASS)-</CODE><CODE>METHOD()>.</CODE> Note that this means that the method specified
+will have to perform any modifications required on the object (like
+creating new state within it, and/or reblessing it in a different package)
+and then return it. The client is responsible for making sure the method
+can be called via the object, and that it returns a valid object. Defaults
+to an empty string.
+
+<DT><STRONG><A NAME="item__Data_Dumper_Deepcopy">$Data::Dumper::Deepcopy or $OBJ->Deepcopy([NEWVAL])</A></STRONG><DD>
+<P>
+Can be set to a boolean value to enable deep copies of structures.
+Cross-referencing will then only be done when absolutely essential (i.e.,
+to break reference cycles). Default is 0.
+
+<DT><STRONG><A NAME="item__Data_Dumper_Quotekeys">$Data::Dumper::Quotekeys or $OBJ->Quotekeys([NEWVAL])</A></STRONG><DD>
+<P>
+Can be set to a boolean value to control whether hash keys are quoted. A
+false value will avoid quoting hash keys when it looks like a simple
+string. Default is 1, which will always enclose hash keys in quotes.
+
+<DT><STRONG><A NAME="item__Data_Dumper_Bless">$Data::Dumper::Bless or $OBJ->Bless([NEWVAL])</A></STRONG><DD>
+<P>
+Can be set to a string that specifies an alternative to the <CODE>bless</CODE>
+builtin operator used to create objects. A function with the specified name
+should exist, and should accept the same arguments as the builtin. Default
+is <CODE>bless</CODE>.
+
+</DL>
+<P>
+<HR>
+<H2><A NAME="Exports">Exports</A></H2>
+<DL>
+<DT><STRONG><A NAME="item_Dumper">Dumper</A></STRONG><DD>
+</DL>
+<P>
+<HR>
+<H1><A NAME="EXAMPLES">EXAMPLES</A></H1>
+<P>
+Run these code snippets to get a quick feel for the behavior of this
+module. When you are through with these examples, you may want to add or
+change the various configuration variables described above, to see their
+behavior. (See the testsuite in the Data::Dumper distribution for more
+examples.)
+
+<P>
+<PRE> use Data::Dumper;
+</PRE>
+<P>
+<PRE> package Foo;
+ sub new {bless {'a' => 1, 'b' => sub { return "foo" }}, $_[0]};
+</PRE>
+<P>
+<PRE> package Fuz; # a weird REF-REF-SCALAR object
+ sub new {bless \($_ = \ 'fu\'z'), $_[0]};
+</PRE>
+<P>
+<PRE> package main;
+ $foo = Foo->new;
+ $fuz = Fuz->new;
+ $boo = [ 1, [], "abcd", \*foo,
+ {1 => 'a', 023 => 'b', 0x45 => 'c'},
+ \\"p\q\'r", $foo, $fuz];
+
+ ########
+ # simple usage
+ ########
+</PRE>
+<P>
+<PRE> $bar = eval(Dumper($boo));
+ print($@) if $@;
+ print Dumper($boo), Dumper($bar); # pretty print (no array indices)
+</PRE>
+<P>
+<PRE> $Data::Dumper::Terse = 1; # don't output names where feasible
+ $Data::Dumper::Indent = 0; # turn off all pretty print
+ print Dumper($boo), "\n";
+</PRE>
+<P>
+<PRE> $Data::Dumper::Indent = 1; # mild pretty print
+ print Dumper($boo);
+</PRE>
+<P>
+<PRE> $Data::Dumper::Indent = 3; # pretty print with array indices
+ print Dumper($boo);
+</PRE>
+<P>
+<PRE> $Data::Dumper::Useqq = 1; # print strings in double quotes
+ print Dumper($boo);
+
+
+ ########
+ # recursive structures
+ ########
+
+ @c = ('c');
+ $c = \@c;
+ $b = {};
+ $a = [1, $b, $c];
+ $b->{a} = $a;
+ $b->{b} = $a->[1];
+ $b->{c} = $a->[2];
+ print Data::Dumper->Dump([$a,$b,$c], [qw(a b c)]);
+
+
+ $Data::Dumper::Purity = 1; # fill in the holes for eval
+ print Data::Dumper->Dump([$a, $b], [qw(*a b)]); # print as @a
+ print Data::Dumper->Dump([$b, $a], [qw(*b a)]); # print as %b
+
+
+ $Data::Dumper::Deepcopy = 1; # avoid cross-refs
+ print Data::Dumper->Dump([$b, $a], [qw(*b a)]);
+
+
+ $Data::Dumper::Purity = 0; # avoid cross-refs
+ print Data::Dumper->Dump([$b, $a], [qw(*b a)]);
+
+
+ ########
+ # object-oriented usage
+ ########
+
+ $d = Data::Dumper->new([$a,$b], [qw(a b)]);
+ $d->Seen({'*c' => $c}); # stash a ref without printing it
+ $d->Indent(3);
+ print $d->Dump;
+ $d->Reset->Purity(0); # empty the seen cache
+ print join "----\n", $d->Dump;
+
+
+ ########
+ # persistence
+ ########
+
+ package Foo;
+ sub new { bless { state => 'awake' }, shift }
+ sub Freeze {
+ my $s = shift;
+ print STDERR "preparing to sleep\n";
+ $s->{state} = 'asleep';
+ return bless $s, 'Foo::ZZZ';
+ }
+
+ package Foo::ZZZ;
+ sub Thaw {
+ my $s = shift;
+ print STDERR "waking up\n";
+ $s->{state} = 'awake';
+ return bless $s, 'Foo';
+ }
+
+ package Foo;
+ use Data::Dumper;
+ $a = Foo->new;
+ $b = Data::Dumper->new([$a], ['c']);
+ $b->Freezer('Freeze');
+ $b->Toaster('Thaw');
+ $c = $b->Dump;
+ print $c;
+ $d = eval $c;
+ print Data::Dumper->Dump([$d], ['d']);
+
+
+ ########
+ # symbol substitution (useful for recreating CODE refs)
+ ########
+
+ sub foo { print "foo speaking\n" }
+ *other = \&foo;
+ $bar = [ \&other ];
+ $d = Data::Dumper->new([\&other,$bar],['*other','bar']);
+ $d->Seen({ '*foo' => \&foo });
+ print $d->Dump;
+</PRE>
+<P>
+<HR>
+<H1><A NAME="BUGS">BUGS</A></H1>
+<P>
+Due to limitations of Perl subroutine call semantics, you cannot pass an
+array or hash. Prepend it with a <CODE>\</CODE> to pass its reference instead. This will be remedied in time, with the
+arrival of prototypes in later versions of Perl. For now, you need to use
+the extended usage form, and prepend the name with a <CODE>*</CODE> to output it as a hash or array.
+
+<P>
+<CODE>Data::Dumper</CODE> cheats with CODE references. If a code reference is encountered in the
+structure being processed, an anonymous subroutine that contains the string
+'``DUMMY''' will be inserted in its place, and a warning will be printed if <CODE>Purity</CODE> is set. You can <CODE>eval</CODE> the result, but bear in mind that the anonymous sub that gets created is
+just a placeholder. Someday, perl will have a switch to cache-on-demand the
+string representation of a compiled piece of code, I hope. If you have
+prior knowledge of all the code refs that your data structures are likely
+to have, you can use the <CODE>Seen</CODE> method to pre-seed the internal reference table and make the dumped output
+point to them, instead. See <A HREF="#EXAMPLES">EXAMPLES</A>
+above.
+
+<P>
+The <CODE>Useqq</CODE> flag is not honored by <CODE>Dumpxs()</CODE> (it always outputs strings in single quotes).
+
+<P>
+SCALAR objects have the weirdest looking <CODE>bless</CODE> workaround.
+
+<P>
+<HR>
+<H1><A NAME="AUTHOR">AUTHOR</A></H1>
+<P>
+Gurusamy Sarathy <A HREF="mailto:gsar at umich.edu">gsar at umich.edu</A>
+
+<P>
+Copyright (c) 1996-98 Gurusamy Sarathy. All rights reserved. This program
+is free software; you can redistribute it and/or modify it under the same
+terms as Perl itself.
+
+<P>
+<HR>
+<H1><A NAME="VERSION">VERSION</A></H1>
+<P>
+Version 2.101 (30 Apr 1999)
+
+<P>
+<HR>
+<H1><A NAME="SEE_ALSO">SEE ALSO</A></H1>
+<P>
+<CODE>perl(1)</CODE>
+
+</BODY>
+
+</HTML>
Added: trunk/orca/packages/Data-Dumper-2.101/MANIFEST.NOXSUB
==============================================================================
--- trunk/orca/packages/Data-Dumper-2.101/MANIFEST.NOXSUB (original)
+++ trunk/orca/packages/Data-Dumper-2.101/MANIFEST.NOXSUB Sat Jul 13 19:22:29 2002
@@ -0,0 +1,12 @@
+README
+MANIFEST
+MANIFEST.XSUB
+Changes
+Todo
+Makefile.PL
+Dumper.pm
+Dumper.pm.XSUB
+Dumper.xs.XSUB
+Dumper.html
+t/dumper.t
+t/overload.t
Added: trunk/orca/packages/Data-Dumper-2.101/t/overload.t
==============================================================================
--- trunk/orca/packages/Data-Dumper-2.101/t/overload.t (original)
+++ trunk/orca/packages/Data-Dumper-2.101/t/overload.t Sat Jul 13 19:22:29 2002
@@ -0,0 +1,24 @@
+#!./perl -w
+
+use Data::Dumper;
+
+print "1..1\n";
+
+package Foo;
+use overload '""' => 'as_string';
+
+sub new { bless { foo => "bar" }, shift }
+sub as_string { "%%%%" }
+
+package main;
+
+my $f = Foo->new;
+
+print "#\$f=$f\n";
+
+$_ = Dumper($f);
+s/^/#/mg;
+print $_;
+
+print "not " unless /bar/ && /Foo/;
+print "ok 1\n";
Added: trunk/orca/packages/Data-Dumper-2.101/t/dumper.t
==============================================================================
--- trunk/orca/packages/Data-Dumper-2.101/t/dumper.t (original)
+++ trunk/orca/packages/Data-Dumper-2.101/t/dumper.t Sat Jul 13 19:22:29 2002
@@ -0,0 +1,699 @@
+#!./perl -w
+#
+# testsuite for Data::Dumper
+#
+
+use Data::Dumper;
+
+$Data::Dumper::Pad = "#";
+my $TMAX;
+my $XS;
+my $TNUM = 0;
+my $WANT = '';
+
+sub TEST {
+ my $string = shift;
+ my $t = eval $string;
+ ++$TNUM;
+ print( ($t eq $WANT and not $@) ? "ok $TNUM\n"
+ : "not ok $TNUM\n--Expected--\n$WANT\n--Got--\n$@$t\n");
+
+ ++$TNUM;
+ eval "$t";
+ print $@ ? "not ok $TNUM\n# \$@ says: $@\n" : "ok $TNUM\n";
+
+ $t = eval $string;
+ ++$TNUM;
+ print( ($t eq $WANT and not $@) ? "ok $TNUM\n"
+ : "not ok $TNUM\n--Expected--\n$WANT\n--Got--\n$@$t\n");
+}
+
+if (defined &Data::Dumper::Dumpxs) {
+ print "### XS extension loaded, will run XS tests\n";
+ $TMAX = 162; $XS = 1;
+}
+else {
+ print "### XS extensions not loaded, will NOT run XS tests\n";
+ $TMAX = 81; $XS = 0;
+}
+
+print "1..$TMAX\n";
+
+#############
+#############
+
+ at c = ('c');
+$c = \@c;
+$b = {};
+$a = [1, $b, $c];
+$b->{a} = $a;
+$b->{b} = $a->[1];
+$b->{c} = $a->[2];
+
+############# 1
+##
+$WANT = <<'EOT';
+#$a = [
+# 1,
+# {
+# 'a' => $a,
+# 'b' => $a->[1],
+# 'c' => [
+# 'c'
+# ]
+# },
+# $a->[1]{'c'}
+# ];
+#$b = $a->[1];
+#$c = $a->[1]{'c'};
+EOT
+
+TEST q(Data::Dumper->Dump([$a,$b,$c], [qw(a b c)]));
+TEST q(Data::Dumper->Dumpxs([$a,$b,$c], [qw(a b c)])) if $XS;
+
+
+############# 7
+##
+$WANT = <<'EOT';
+#@a = (
+# 1,
+# {
+# 'a' => [],
+# 'b' => {},
+# 'c' => [
+# 'c'
+# ]
+# },
+# []
+# );
+#$a[1]{'a'} = \@a;
+#$a[1]{'b'} = $a[1];
+#$a[2] = $a[1]{'c'};
+#$b = $a[1];
+EOT
+
+$Data::Dumper::Purity = 1; # fill in the holes for eval
+TEST q(Data::Dumper->Dump([$a, $b], [qw(*a b)])); # print as @a
+TEST q(Data::Dumper->Dumpxs([$a, $b], [qw(*a b)])) if $XS;
+
+############# 13
+##
+$WANT = <<'EOT';
+#%b = (
+# 'a' => [
+# 1,
+# {},
+# [
+# 'c'
+# ]
+# ],
+# 'b' => {},
+# 'c' => []
+# );
+#$b{'a'}[1] = \%b;
+#$b{'b'} = \%b;
+#$b{'c'} = $b{'a'}[2];
+#$a = $b{'a'};
+EOT
+
+TEST q(Data::Dumper->Dump([$b, $a], [qw(*b a)])); # print as %b
+TEST q(Data::Dumper->Dumpxs([$b, $a], [qw(*b a)])) if $XS;
+
+############# 19
+##
+$WANT = <<'EOT';
+#$a = [
+# 1,
+# {
+# 'a' => [],
+# 'b' => {},
+# 'c' => []
+# },
+# []
+#];
+#$a->[1]{'a'} = $a;
+#$a->[1]{'b'} = $a->[1];
+#$a->[1]{'c'} = \@c;
+#$a->[2] = \@c;
+#$b = $a->[1];
+EOT
+
+$Data::Dumper::Indent = 1;
+TEST q(
+ $d = Data::Dumper->new([$a,$b], [qw(a b)]);
+ $d->Seen({'*c' => $c});
+ $d->Dump;
+ );
+if ($XS) {
+ TEST q(
+ $d = Data::Dumper->new([$a,$b], [qw(a b)]);
+ $d->Seen({'*c' => $c});
+ $d->Dumpxs;
+ );
+}
+
+
+############# 25
+##
+$WANT = <<'EOT';
+#$a = [
+# #0
+# 1,
+# #1
+# {
+# a => $a,
+# b => $a->[1],
+# c => [
+# #0
+# 'c'
+# ]
+# },
+# #2
+# $a->[1]{c}
+# ];
+#$b = $a->[1];
+EOT
+
+$d->Indent(3);
+$d->Purity(0)->Quotekeys(0);
+TEST q( $d->Reset; $d->Dump );
+
+TEST q( $d->Reset; $d->Dumpxs ) if $XS;
+
+############# 31
+##
+$WANT = <<'EOT';
+#$VAR1 = [
+# 1,
+# {
+# 'a' => [],
+# 'b' => {},
+# 'c' => [
+# 'c'
+# ]
+# },
+# []
+#];
+#$VAR1->[1]{'a'} = $VAR1;
+#$VAR1->[1]{'b'} = $VAR1->[1];
+#$VAR1->[2] = $VAR1->[1]{'c'};
+EOT
+
+TEST q(Dumper($a));
+TEST q(Data::Dumper::DumperX($a)) if $XS;
+
+############# 37
+##
+$WANT = <<'EOT';
+#[
+# 1,
+# {
+# a => $VAR1,
+# b => $VAR1->[1],
+# c => [
+# 'c'
+# ]
+# },
+# $VAR1->[1]{c}
+#]
+EOT
+
+{
+ local $Data::Dumper::Purity = 0;
+ local $Data::Dumper::Quotekeys = 0;
+ local $Data::Dumper::Terse = 1;
+ TEST q(Dumper($a));
+ TEST q(Data::Dumper::DumperX($a)) if $XS;
+}
+
+
+############# 43
+##
+$WANT = <<'EOT';
+#$VAR1 = {
+# "abc\0'\efg" => "mno\0"
+#};
+EOT
+
+$foo = { "abc\000\'\efg" => "mno\000" };
+{
+ local $Data::Dumper::Useqq = 1;
+ TEST q(Dumper($foo));
+}
+
+ $WANT = <<"EOT";
+#\$VAR1 = {
+# 'abc\0\\'\efg' => 'mno\0'
+#};
+EOT
+
+ {
+ local $Data::Dumper::Useqq = 1;
+ TEST q(Data::Dumper::DumperX($foo)) if $XS; # cheat
+ }
+
+
+
+#############
+#############
+
+{
+ package main;
+ use Data::Dumper;
+ $foo = 5;
+ @foo = (10,\*foo);
+ %foo = (a=>1,b=>\$foo,c=>\@foo);
+ $foo{d} = \%foo;
+ $foo[2] = \%foo;
+
+############# 49
+##
+ $WANT = <<'EOT';
+#$foo = \*::foo;
+#*::foo = \5;
+#*::foo = [
+# #0
+# 10,
+# #1
+# '',
+# #2
+# {
+# 'a' => 1,
+# 'b' => '',
+# 'c' => [],
+# 'd' => {}
+# }
+# ];
+#*::foo{ARRAY}->[1] = $foo;
+#*::foo{ARRAY}->[2]{'b'} = *::foo{SCALAR};
+#*::foo{ARRAY}->[2]{'c'} = *::foo{ARRAY};
+#*::foo{ARRAY}->[2]{'d'} = *::foo{ARRAY}->[2];
+#*::foo = *::foo{ARRAY}->[2];
+#@bar = @{*::foo{ARRAY}};
+#%baz = %{*::foo{ARRAY}->[2]};
+EOT
+
+ $Data::Dumper::Purity = 1;
+ $Data::Dumper::Indent = 3;
+ TEST q(Data::Dumper->Dump([\\*foo, \\@foo, \\%foo], ['*foo', '*bar', '*baz']));
+ TEST q(Data::Dumper->Dumpxs([\\*foo, \\@foo, \\%foo], ['*foo', '*bar', '*baz'])) if $XS;
+
+############# 55
+##
+ $WANT = <<'EOT';
+#$foo = \*::foo;
+#*::foo = \5;
+#*::foo = [
+# 10,
+# '',
+# {
+# 'a' => 1,
+# 'b' => '',
+# 'c' => [],
+# 'd' => {}
+# }
+#];
+#*::foo{ARRAY}->[1] = $foo;
+#*::foo{ARRAY}->[2]{'b'} = *::foo{SCALAR};
+#*::foo{ARRAY}->[2]{'c'} = *::foo{ARRAY};
+#*::foo{ARRAY}->[2]{'d'} = *::foo{ARRAY}->[2];
+#*::foo = *::foo{ARRAY}->[2];
+#$bar = *::foo{ARRAY};
+#$baz = *::foo{ARRAY}->[2];
+EOT
+
+ $Data::Dumper::Indent = 1;
+ TEST q(Data::Dumper->Dump([\\*foo, \\@foo, \\%foo], ['foo', 'bar', 'baz']));
+ TEST q(Data::Dumper->Dumpxs([\\*foo, \\@foo, \\%foo], ['foo', 'bar', 'baz'])) if $XS;
+
+############# 61
+##
+ $WANT = <<'EOT';
+#@bar = (
+# 10,
+# \*::foo,
+# {}
+#);
+#*::foo = \5;
+#*::foo = \@bar;
+#*::foo = {
+# 'a' => 1,
+# 'b' => '',
+# 'c' => [],
+# 'd' => {}
+#};
+#*::foo{HASH}->{'b'} = *::foo{SCALAR};
+#*::foo{HASH}->{'c'} = \@bar;
+#*::foo{HASH}->{'d'} = *::foo{HASH};
+#$bar[2] = *::foo{HASH};
+#%baz = %{*::foo{HASH}};
+#$foo = $bar[1];
+EOT
+
+ TEST q(Data::Dumper->Dump([\\@foo, \\%foo, \\*foo], ['*bar', '*baz', '*foo']));
+ TEST q(Data::Dumper->Dumpxs([\\@foo, \\%foo, \\*foo], ['*bar', '*baz', '*foo'])) if $XS;
+
+############# 67
+##
+ $WANT = <<'EOT';
+#$bar = [
+# 10,
+# \*::foo,
+# {}
+#];
+#*::foo = \5;
+#*::foo = $bar;
+#*::foo = {
+# 'a' => 1,
+# 'b' => '',
+# 'c' => [],
+# 'd' => {}
+#};
+#*::foo{HASH}->{'b'} = *::foo{SCALAR};
+#*::foo{HASH}->{'c'} = $bar;
+#*::foo{HASH}->{'d'} = *::foo{HASH};
+#$bar->[2] = *::foo{HASH};
+#$baz = *::foo{HASH};
+#$foo = $bar->[1];
+EOT
+
+ TEST q(Data::Dumper->Dump([\\@foo, \\%foo, \\*foo], ['bar', 'baz', 'foo']));
+ TEST q(Data::Dumper->Dumpxs([\\@foo, \\%foo, \\*foo], ['bar', 'baz', 'foo'])) if $XS;
+
+############# 73
+##
+ $WANT = <<'EOT';
+#$foo = \*::foo;
+#@bar = (
+# 10,
+# $foo,
+# {
+# a => 1,
+# b => \5,
+# c => \@bar,
+# d => $bar[2]
+# }
+#);
+#%baz = %{$bar[2]};
+EOT
+
+ $Data::Dumper::Purity = 0;
+ $Data::Dumper::Quotekeys = 0;
+ TEST q(Data::Dumper->Dump([\\*foo, \\@foo, \\%foo], ['*foo', '*bar', '*baz']));
+ TEST q(Data::Dumper->Dumpxs([\\*foo, \\@foo, \\%foo], ['*foo', '*bar', '*baz'])) if $XS;
+
+############# 79
+##
+ $WANT = <<'EOT';
+#$foo = \*::foo;
+#$bar = [
+# 10,
+# $foo,
+# {
+# a => 1,
+# b => \5,
+# c => $bar,
+# d => $bar->[2]
+# }
+#];
+#$baz = $bar->[2];
+EOT
+
+ TEST q(Data::Dumper->Dump([\\*foo, \\@foo, \\%foo], ['foo', 'bar', 'baz']));
+ TEST q(Data::Dumper->Dumpxs([\\*foo, \\@foo, \\%foo], ['foo', 'bar', 'baz'])) if $XS;
+
+}
+
+#############
+#############
+{
+ package main;
+ @dogs = ( 'Fido', 'Wags' );
+ %kennel = (
+ First => \$dogs[0],
+ Second => \$dogs[1],
+ );
+ $dogs[2] = \%kennel;
+ $mutts = \%kennel;
+ $mutts = $mutts; # avoid warning
+
+############# 85
+##
+ $WANT = <<'EOT';
+#%kennels = (
+# First => \'Fido',
+# Second => \'Wags'
+#);
+#@dogs = (
+# ${$kennels{First}},
+# ${$kennels{Second}},
+# \%kennels
+#);
+#%mutts = %kennels;
+EOT
+
+ TEST q(
+ $d = Data::Dumper->new([\\%kennel, \\@dogs, $mutts],
+ [qw(*kennels *dogs *mutts)] );
+ $d->Dump;
+ );
+ if ($XS) {
+ TEST q(
+ $d = Data::Dumper->new([\\%kennel, \\@dogs, $mutts],
+ [qw(*kennels *dogs *mutts)] );
+ $d->Dumpxs;
+ );
+ }
+
+############# 91
+##
+ $WANT = <<'EOT';
+#%kennels = %kennels;
+#@dogs = @dogs;
+#%mutts = %kennels;
+EOT
+
+ TEST q($d->Dump);
+ TEST q($d->Dumpxs) if $XS;
+
+############# 97
+##
+ $WANT = <<'EOT';
+#%kennels = (
+# First => \'Fido',
+# Second => \'Wags'
+#);
+#@dogs = (
+# ${$kennels{First}},
+# ${$kennels{Second}},
+# \%kennels
+#);
+#%mutts = %kennels;
+EOT
+
+
+ TEST q($d->Reset; $d->Dump);
+ if ($XS) {
+ TEST q($d->Reset; $d->Dumpxs);
+ }
+
+############# 103
+##
+ $WANT = <<'EOT';
+#@dogs = (
+# 'Fido',
+# 'Wags',
+# {
+# First => \$dogs[0],
+# Second => \$dogs[1]
+# }
+#);
+#%kennels = %{$dogs[2]};
+#%mutts = %{$dogs[2]};
+EOT
+
+ TEST q(
+ $d = Data::Dumper->new([\\@dogs, \\%kennel, $mutts],
+ [qw(*dogs *kennels *mutts)] );
+ $d->Dump;
+ );
+ if ($XS) {
+ TEST q(
+ $d = Data::Dumper->new([\\@dogs, \\%kennel, $mutts],
+ [qw(*dogs *kennels *mutts)] );
+ $d->Dumpxs;
+ );
+ }
+
+############# 109
+##
+ TEST q($d->Reset->Dump);
+ if ($XS) {
+ TEST q($d->Reset->Dumpxs);
+ }
+
+############# 115
+##
+ $WANT = <<'EOT';
+#@dogs = (
+# 'Fido',
+# 'Wags',
+# {
+# First => \'Fido',
+# Second => \'Wags'
+# }
+#);
+#%kennels = (
+# First => \'Fido',
+# Second => \'Wags'
+#);
+EOT
+
+ TEST q(
+ $d = Data::Dumper->new( [\@dogs, \%kennel], [qw(*dogs *kennels)] );
+ $d->Deepcopy(1)->Dump;
+ );
+ if ($XS) {
+ TEST q($d->Reset->Dumpxs);
+ }
+
+}
+
+{
+
+sub z { print "foo\n" }
+$c = [ \&z ];
+
+############# 121
+##
+ $WANT = <<'EOT';
+#$a = $b;
+#$c = [
+# $b
+#];
+EOT
+
+TEST q(Data::Dumper->new([\&z,$c],['a','c'])->Seen({'b' => \&z})->Dump;);
+TEST q(Data::Dumper->new([\&z,$c],['a','c'])->Seen({'b' => \&z})->Dumpxs;)
+ if $XS;
+
+############# 127
+##
+ $WANT = <<'EOT';
+#$a = \&b;
+#$c = [
+# \&b
+#];
+EOT
+
+TEST q(Data::Dumper->new([\&z,$c],['a','c'])->Seen({'*b' => \&z})->Dump;);
+TEST q(Data::Dumper->new([\&z,$c],['a','c'])->Seen({'*b' => \&z})->Dumpxs;)
+ if $XS;
+
+############# 133
+##
+ $WANT = <<'EOT';
+#*a = \&b;
+#@c = (
+# \&b
+#);
+EOT
+
+TEST q(Data::Dumper->new([\&z,$c],['*a','*c'])->Seen({'*b' => \&z})->Dump;);
+TEST q(Data::Dumper->new([\&z,$c],['*a','*c'])->Seen({'*b' => \&z})->Dumpxs;)
+ if $XS;
+
+}
+
+{
+ $a = [];
+ $a->[1] = \$a->[0];
+
+############# 139
+##
+ $WANT = <<'EOT';
+#@a = (
+# undef,
+# ''
+#);
+#$a[1] = \$a[0];
+EOT
+
+TEST q(Data::Dumper->new([$a],['*a'])->Purity(1)->Dump;);
+TEST q(Data::Dumper->new([$a],['*a'])->Purity(1)->Dumpxs;)
+ if $XS;
+}
+
+{
+ $a = \\\\\'foo';
+ $b = $$$a;
+
+############# 145
+##
+ $WANT = <<'EOT';
+#$a = \\\\\'foo';
+#$b = ${${$a}};
+EOT
+
+TEST q(Data::Dumper->new([$a,$b],['a','b'])->Purity(1)->Dump;);
+TEST q(Data::Dumper->new([$a,$b],['a','b'])->Purity(1)->Dumpxs;)
+ if $XS;
+}
+
+{
+ $a = [{ a => \$b }, { b => undef }];
+ $b = [{ c => \$b }, { d => \$a }];
+
+############# 151
+##
+ $WANT = <<'EOT';
+#$a = [
+# {
+# a => \[
+# {
+# c => ''
+# },
+# {
+# d => \[]
+# }
+# ]
+# },
+# {
+# b => undef
+# }
+#];
+#${$a->[0]{a}}->[0]->{c} = $a->[0]{a};
+#${${$a->[0]{a}}->[1]->{d}} = $a;
+#$b = ${$a->[0]{a}};
+EOT
+
+TEST q(Data::Dumper->new([$a,$b],['a','b'])->Purity(1)->Dump;);
+TEST q(Data::Dumper->new([$a,$b],['a','b'])->Purity(1)->Dumpxs;)
+ if $XS;
+}
+
+{
+ $a = [[[[\\\\\'foo']]]];
+ $b = $a->[0][0];
+ $c = $${$b->[0][0]};
+
+############# 157
+##
+ $WANT = <<'EOT';
+#$a = [
+# [
+# [
+# [
+# \\\\\'foo'
+# ]
+# ]
+# ]
+#];
+#$b = $a->[0][0];
+#$c = ${${$a->[0][0][0][0]}};
+EOT
+
+TEST q(Data::Dumper->new([$a,$b,$c],['a','b','c'])->Purity(1)->Dump;);
+TEST q(Data::Dumper->new([$a,$b,$c],['a','b','c'])->Purity(1)->Dumpxs;)
+ if $XS;
+}
Added: trunk/orca/packages/Data-Dumper-2.101/Dumper.pm.NOXSUB
==============================================================================
--- trunk/orca/packages/Data-Dumper-2.101/Dumper.pm.NOXSUB (original)
+++ trunk/orca/packages/Data-Dumper-2.101/Dumper.pm.NOXSUB Sat Jul 13 19:22:29 2002
@@ -0,0 +1,1001 @@
+#
+# Data/Dumper.pm
+#
+# convert perl data structures into perl syntax suitable for both printing
+# and eval
+#
+# Documentation at the __END__
+#
+
+package Data::Dumper;
+
+$VERSION = $VERSION = '2.101';
+
+#$| = 1;
+
+require 5.004;
+require Exporter;
+##require DynaLoader;
+require overload;
+
+use Carp;
+
+ at ISA = qw(Exporter);
+##@ISA = qw(Exporter Dynaloader);
+ at EXPORT = qw(Dumper);
+##@EXPORT_OK = qw(DumperX);
+
+##bootstrap Data::Dumper;
+
+# module vars and their defaults
+$Indent = 2 unless defined $Indent;
+$Purity = 0 unless defined $Purity;
+$Pad = "" unless defined $Pad;
+$Varname = "VAR" unless defined $Varname;
+$Useqq = 0 unless defined $Useqq;
+$Terse = 0 unless defined $Terse;
+$Freezer = "" unless defined $Freezer;
+$Toaster = "" unless defined $Toaster;
+$Deepcopy = 0 unless defined $Deepcopy;
+$Quotekeys = 1 unless defined $Quotekeys;
+$Bless = "bless" unless defined $Bless;
+#$Expdepth = 0 unless defined $Expdepth;
+#$Maxdepth = 0 unless defined $Maxdepth;
+
+#
+# expects an arrayref of values to be dumped.
+# can optionally pass an arrayref of names for the values.
+# names must have leading $ sign stripped. begin the name with *
+# to cause output of arrays and hashes rather than refs.
+#
+sub new {
+ my($c, $v, $n) = @_;
+
+ croak "Usage: PACKAGE->new(ARRAYREF, [ARRAYREF])"
+ unless (defined($v) && (ref($v) eq 'ARRAY'));
+ $n = [] unless (defined($n) && (ref($v) eq 'ARRAY'));
+
+ my($s) = {
+ level => 0, # current recursive depth
+ indent => $Indent, # various styles of indenting
+ pad => $Pad, # all lines prefixed by this string
+ xpad => "", # padding-per-level
+ apad => "", # added padding for hash keys n such
+ sep => "", # list separator
+ seen => {}, # local (nested) refs (id => [name, val])
+ todump => $v, # values to dump []
+ names => $n, # optional names for values []
+ varname => $Varname, # prefix to use for tagging nameless ones
+ purity => $Purity, # degree to which output is evalable
+ useqq => $Useqq, # use "" for strings (backslashitis ensues)
+ terse => $Terse, # avoid name output (where feasible)
+ freezer => $Freezer, # name of Freezer method for objects
+ toaster => $Toaster, # name of method to revive objects
+ deepcopy => $Deepcopy, # dont cross-ref, except to stop recursion
+ quotekeys => $Quotekeys, # quote hash keys
+ 'bless' => $Bless, # keyword to use for "bless"
+# expdepth => $Expdepth, # cutoff depth for explicit dumping
+# maxdepth => $Maxdepth, # depth beyond which we give up
+ };
+
+ if ($Indent > 0) {
+ $s->{xpad} = " ";
+ $s->{sep} = "\n";
+ }
+ return bless($s, $c);
+}
+
+#
+# add-to or query the table of already seen references
+#
+sub Seen {
+ my($s, $g) = @_;
+ if (defined($g) && (ref($g) eq 'HASH')) {
+ my($k, $v, $id);
+ while (($k, $v) = each %$g) {
+ if (defined $v and ref $v) {
+ ($id) = (overload::StrVal($v) =~ /\((.*)\)$/);
+ if ($k =~ /^[*](.*)$/) {
+ $k = (ref $v eq 'ARRAY') ? ( "\\\@" . $1 ) :
+ (ref $v eq 'HASH') ? ( "\\\%" . $1 ) :
+ (ref $v eq 'CODE') ? ( "\\\&" . $1 ) :
+ ( "\$" . $1 ) ;
+ }
+ elsif ($k !~ /^\$/) {
+ $k = "\$" . $k;
+ }
+ $s->{seen}{$id} = [$k, $v];
+ }
+ else {
+ carp "Only refs supported, ignoring non-ref item \$$k";
+ }
+ }
+ return $s;
+ }
+ else {
+ return map { @$_ } values %{$s->{seen}};
+ }
+}
+
+#
+# set or query the values to be dumped
+#
+sub Values {
+ my($s, $v) = @_;
+ if (defined($v) && (ref($v) eq 'ARRAY')) {
+ $s->{todump} = [@$v]; # make a copy
+ return $s;
+ }
+ else {
+ return @{$s->{todump}};
+ }
+}
+
+#
+# set or query the names of the values to be dumped
+#
+sub Names {
+ my($s, $n) = @_;
+ if (defined($n) && (ref($n) eq 'ARRAY')) {
+ $s->{names} = [@$n]; # make a copy
+ return $s;
+ }
+ else {
+ return @{$s->{names}};
+ }
+}
+
+sub DESTROY {}
+
+#
+# dump the refs in the current dumper object.
+# expects same args as new() if called via package name.
+#
+sub Dump {
+ my($s) = shift;
+ my(@out, $val, $name);
+ my($i) = 0;
+ local(@post);
+
+ $s = $s->new(@_) unless ref $s;
+
+ for $val (@{$s->{todump}}) {
+ my $out = "";
+ @post = ();
+ $name = $s->{names}[$i++];
+ if (defined $name) {
+ if ($name =~ /^[*](.*)$/) {
+ if (defined $val) {
+ $name = (ref $val eq 'ARRAY') ? ( "\@" . $1 ) :
+ (ref $val eq 'HASH') ? ( "\%" . $1 ) :
+ (ref $val eq 'CODE') ? ( "\*" . $1 ) :
+ ( "\$" . $1 ) ;
+ }
+ else {
+ $name = "\$" . $1;
+ }
+ }
+ elsif ($name !~ /^\$/) {
+ $name = "\$" . $name;
+ }
+ }
+ else {
+ $name = "\$" . $s->{varname} . $i;
+ }
+
+ my $valstr;
+ {
+ local($s->{apad}) = $s->{apad};
+ $s->{apad} .= ' ' x (length($name) + 3) if $s->{indent} >= 2;
+ $valstr = $s->_dump($val, $name);
+ }
+
+ $valstr = "$name = " . $valstr . ';' if @post or !$s->{terse};
+ $out .= $s->{pad} . $valstr . $s->{sep};
+ $out .= $s->{pad} . join(';' . $s->{sep} . $s->{pad}, @post)
+ . ';' . $s->{sep} if @post;
+
+ push @out, $out;
+ }
+ return wantarray ? @out : join('', @out);
+}
+
+#
+# twist, toil and turn;
+# and recurse, of course.
+#
+sub _dump {
+ my($s, $val, $name) = @_;
+ my($sname);
+ my($out, $realpack, $realtype, $type, $ipad, $id, $blesspad);
+
+ $type = ref $val;
+ $out = "";
+
+ if ($type) {
+
+ # prep it, if it looks like an object
+ if ($type =~ /[a-z_:]/) {
+ my $freezer = $s->{freezer};
+ $val->$freezer() if $freezer && UNIVERSAL::can($val, $freezer);
+ }
+
+ ($realpack, $realtype, $id) =
+ (overload::StrVal($val) =~ /^(?:(.*)\=)?([^=]*)\(([^\(]*)\)$/);
+
+ # if it has a name, we need to either look it up, or keep a tab
+ # on it so we know when we hit it later
+ if (defined($name) and length($name)) {
+ # keep a tab on it so that we dont fall into recursive pit
+ if (exists $s->{seen}{$id}) {
+# if ($s->{expdepth} < $s->{level}) {
+ if ($s->{purity} and $s->{level} > 0) {
+ $out = ($realtype eq 'HASH') ? '{}' :
+ ($realtype eq 'ARRAY') ? '[]' :
+ "''" ;
+ push @post, $name . " = " . $s->{seen}{$id}[0];
+ }
+ else {
+ $out = $s->{seen}{$id}[0];
+ if ($name =~ /^([\@\%])/) {
+ my $start = $1;
+ if ($out =~ /^\\$start/) {
+ $out = substr($out, 1);
+ }
+ else {
+ $out = $start . '{' . $out . '}';
+ }
+ }
+ }
+ return $out;
+# }
+ }
+ else {
+ # store our name
+ $s->{seen}{$id} = [ (($name =~ /^[@%]/) ? ('\\' . $name ) :
+ ($realtype eq 'CODE' and
+ $name =~ /^[*](.*)$/) ? ('\\&' . $1 ) :
+ $name ),
+ $val ];
+ }
+ }
+
+ $s->{level}++;
+ $ipad = $s->{xpad} x $s->{level};
+
+ if ($realpack) { # we have a blessed ref
+ $out = $s->{'bless'} . '( ';
+ $blesspad = $s->{apad};
+ $s->{apad} .= ' ' if ($s->{indent} >= 2);
+ }
+
+ if ($realtype eq 'SCALAR') {
+ if ($realpack) {
+ $out .= 'do{\\(my $o = ' . $s->_dump($$val, "\${$name}") . ')}';
+ }
+ else {
+ $out .= '\\' . $s->_dump($$val, "\${$name}");
+ }
+ }
+ elsif ($realtype eq 'GLOB') {
+ $out .= '\\' . $s->_dump($$val, "*{$name}");
+ }
+ elsif ($realtype eq 'ARRAY') {
+ my($v, $pad, $mname);
+ my($i) = 0;
+ $out .= ($name =~ /^\@/) ? '(' : '[';
+ $pad = $s->{sep} . $s->{pad} . $s->{apad};
+ ($name =~ /^\@(.*)$/) ? ($mname = "\$" . $1) :
+ # omit -> if $foo->[0]->{bar}, but not ${$foo->[0]}->{bar}
+ ($name =~ /^\\?[\%\@\*\$][^{].*[]}]$/) ? ($mname = $name) :
+ ($mname = $name . '->');
+ $mname .= '->' if $mname =~ /^\*.+\{[A-Z]+\}$/;
+ for $v (@$val) {
+ $sname = $mname . '[' . $i . ']';
+ $out .= $pad . $ipad . '#' . $i if $s->{indent} >= 3;
+ $out .= $pad . $ipad . $s->_dump($v, $sname);
+ $out .= "," if $i++ < $#$val;
+ }
+ $out .= $pad . ($s->{xpad} x ($s->{level} - 1)) if $i;
+ $out .= ($name =~ /^\@/) ? ')' : ']';
+ }
+ elsif ($realtype eq 'HASH') {
+ my($k, $v, $pad, $lpad, $mname);
+ $out .= ($name =~ /^\%/) ? '(' : '{';
+ $pad = $s->{sep} . $s->{pad} . $s->{apad};
+ $lpad = $s->{apad};
+ ($name =~ /^\%(.*)$/) ? ($mname = "\$" . $1) :
+ # omit -> if $foo->[0]->{bar}, but not ${$foo->[0]}->{bar}
+ ($name =~ /^\\?[\%\@\*\$][^{].*[]}]$/) ? ($mname = $name) :
+ ($mname = $name . '->');
+ $mname .= '->' if $mname =~ /^\*.+\{[A-Z]+\}$/;
+ while (($k, $v) = each %$val) {
+ my $nk = $s->_dump($k, "");
+ $nk = $1 if !$s->{quotekeys} and $nk =~ /^[\"\']([A-Za-z_]\w*)[\"\']$/;
+ $sname = $mname . '{' . $nk . '}';
+ $out .= $pad . $ipad . $nk . " => ";
+
+ # temporarily alter apad
+ $s->{apad} .= (" " x (length($nk) + 4)) if $s->{indent} >= 2;
+ $out .= $s->_dump($val->{$k}, $sname) . ",";
+ $s->{apad} = $lpad if $s->{indent} >= 2;
+ }
+ if (substr($out, -1) eq ',') {
+ chop $out;
+ $out .= $pad . ($s->{xpad} x ($s->{level} - 1));
+ }
+ $out .= ($name =~ /^\%/) ? ')' : '}';
+ }
+ elsif ($realtype eq 'CODE') {
+ $out .= 'sub { "DUMMY" }';
+ carp "Encountered CODE ref, using dummy placeholder" if $s->{purity};
+ }
+ else {
+ croak "Can\'t handle $realtype type.";
+ }
+
+ if ($realpack) { # we have a blessed ref
+ $out .= ', \'' . $realpack . '\'' . ' )';
+ $out .= '->' . $s->{toaster} . '()' if $s->{toaster} ne '';
+ $s->{apad} = $blesspad;
+ }
+ $s->{level}--;
+
+ }
+ else { # simple scalar
+
+ my $ref = \$_[1];
+ # first, catalog the scalar
+ if ($name ne '') {
+ ($id) = ("$ref" =~ /\(([^\(]*)\)$/);
+ if (exists $s->{seen}{$id}) {
+ if ($s->{seen}{$id}[2]) {
+ $out = $s->{seen}{$id}[0];
+ #warn "[<$out]\n";
+ return "\${$out}";
+ }
+ }
+ else {
+ #warn "[>\\$name]\n";
+ $s->{seen}{$id} = ["\\$name", $ref];
+ }
+ }
+ if (ref($ref) eq 'GLOB' or "$ref" =~ /=GLOB\([^()]+\)$/) { # glob
+ my $name = substr($val, 1);
+ if ($name =~ /^[A-Za-z_][\w:]*$/) {
+ $name =~ s/^main::/::/;
+ $sname = $name;
+ }
+ else {
+ $sname = $s->_dump($name, "");
+ $sname = '{' . $sname . '}';
+ }
+ if ($s->{purity}) {
+ my $k;
+ local ($s->{level}) = 0;
+ for $k (qw(SCALAR ARRAY HASH)) {
+ my $gval = *$val{$k};
+ next unless defined $gval;
+ next if $k eq "SCALAR" && ! defined $$gval; # always there
+
+ # _dump can push into @post, so we hold our place using $postlen
+ my $postlen = scalar @post;
+ $post[$postlen] = "\*$sname = ";
+ local ($s->{apad}) = " " x length($post[$postlen]) if $s->{indent} >= 2;
+ $post[$postlen] .= $s->_dump($gval, "\*$sname\{$k\}");
+ }
+ }
+ $out .= '*' . $sname;
+ }
+ elsif (!defined($val)) {
+ $out .= "undef";
+ }
+ elsif ($val =~ /^-?[1-9]\d{0,8}$/) { # safe decimal number
+ $out .= $val;
+ }
+ else { # string
+ if ($s->{useqq}) {
+ $out .= qquote($val, $s->{useqq});
+ }
+ else {
+ $val =~ s/([\\\'])/\\$1/g;
+ $out .= '\'' . $val . '\'';
+ }
+ }
+ }
+ if ($id) {
+ # if we made it this far, $id was added to seen list at current
+ # level, so remove it to get deep copies
+ if ($s->{deepcopy}) {
+ delete($s->{seen}{$id});
+ }
+ elsif ($name) {
+ $s->{seen}{$id}[2] = 1;
+ }
+ }
+ return $out;
+}
+
+#
+# non-OO style of earlier version
+#
+sub Dumper {
+ return Data::Dumper->Dump([@_]);
+}
+
+#
+# same, only calls the XS version
+#
+sub DumperX {
+ croak "XSUB extensions of Data::Dumper not installed";
+}
+
+sub Dumpf { return Data::Dumper->Dump(@_) }
+
+sub Dumpp { print Data::Dumper->Dump(@_) }
+
+#
+# reset the "seen" cache
+#
+sub Reset {
+ my($s) = shift;
+ $s->{seen} = {};
+ return $s;
+}
+
+sub Indent {
+ my($s, $v) = @_;
+ if (defined($v)) {
+ if ($v == 0) {
+ $s->{xpad} = "";
+ $s->{sep} = "";
+ }
+ else {
+ $s->{xpad} = " ";
+ $s->{sep} = "\n";
+ }
+ $s->{indent} = $v;
+ return $s;
+ }
+ else {
+ return $s->{indent};
+ }
+}
+
+sub Pad {
+ my($s, $v) = @_;
+ defined($v) ? (($s->{pad} = $v), return $s) : $s->{pad};
+}
+
+sub Varname {
+ my($s, $v) = @_;
+ defined($v) ? (($s->{varname} = $v), return $s) : $s->{varname};
+}
+
+sub Purity {
+ my($s, $v) = @_;
+ defined($v) ? (($s->{purity} = $v), return $s) : $s->{purity};
+}
+
+sub Useqq {
+ my($s, $v) = @_;
+ defined($v) ? (($s->{useqq} = $v), return $s) : $s->{useqq};
+}
+
+sub Terse {
+ my($s, $v) = @_;
+ defined($v) ? (($s->{terse} = $v), return $s) : $s->{terse};
+}
+
+sub Freezer {
+ my($s, $v) = @_;
+ defined($v) ? (($s->{freezer} = $v), return $s) : $s->{freezer};
+}
+
+sub Toaster {
+ my($s, $v) = @_;
+ defined($v) ? (($s->{toaster} = $v), return $s) : $s->{toaster};
+}
+
+sub Deepcopy {
+ my($s, $v) = @_;
+ defined($v) ? (($s->{deepcopy} = $v), return $s) : $s->{deepcopy};
+}
+
+sub Quotekeys {
+ my($s, $v) = @_;
+ defined($v) ? (($s->{quotekeys} = $v), return $s) : $s->{quotekeys};
+}
+
+sub Bless {
+ my($s, $v) = @_;
+ defined($v) ? (($s->{'bless'} = $v), return $s) : $s->{'bless'};
+}
+
+# used by qquote below
+my %esc = (
+ "\a" => "\\a",
+ "\b" => "\\b",
+ "\t" => "\\t",
+ "\n" => "\\n",
+ "\f" => "\\f",
+ "\r" => "\\r",
+ "\e" => "\\e",
+);
+
+# put a string value in double quotes
+sub qquote {
+ local($_) = shift;
+ s/([\\\"\@\$])/\\$1/g;
+ return qq("$_") unless /[^\040-\176]/; # fast exit
+
+ my $high = shift || "";
+ s/([\a\b\t\n\f\r\e])/$esc{$1}/g;
+
+ # no need for 3 digits in escape for these
+ s/([\0-\037])(?!\d)/'\\'.sprintf('%o',ord($1))/eg;
+
+ s/([\0-\037\177])/'\\'.sprintf('%03o',ord($1))/eg;
+ if ($high eq "iso8859") {
+ s/([\200-\240])/'\\'.sprintf('%o',ord($1))/eg;
+ } elsif ($high eq "utf8") {
+# use utf8;
+# $str =~ s/([^\040-\176])/sprintf "\\x{%04x}", ord($1)/ge;
+ } elsif ($high eq "8bit") {
+ # leave it as it is
+ } else {
+ s/([\0-\037\177-\377])/'\\'.sprintf('%03o',ord($1))/eg;
+ }
+ return qq("$_");
+}
+
+1;
+__END__
+
+=head1 NAME
+
+Data::Dumper - stringified perl data structures, suitable for both printing and C<eval>
+
+
+=head1 SYNOPSIS
+
+ use Data::Dumper;
+
+ # simple procedural interface
+ print Dumper($foo, $bar);
+
+ # extended usage with names
+ print Data::Dumper->Dump([$foo, $bar], [qw(foo *ary)]);
+
+ # configuration variables
+ {
+ local $Data::Dump::Purity = 1;
+ eval Data::Dumper->Dump([$foo, $bar], [qw(foo *ary)]);
+ }
+
+ # OO usage
+ $d = Data::Dumper->new([$foo, $bar], [qw(foo *ary)]);
+ ...
+ print $d->Dump;
+ ...
+ $d->Purity(1)->Terse(1)->Deepcopy(1);
+ eval $d->Dump;
+
+
+=head1 DESCRIPTION
+
+Given a list of scalars or reference variables, writes out their contents in
+perl syntax. The references can also be objects. The contents of each
+variable is output in a single Perl statement. Handles self-referential
+structures correctly.
+
+The return value can be C<eval>ed to get back an identical copy of the
+original reference structure.
+
+Any references that are the same as one of those passed in will be named
+C<$VAR>I<n> (where I<n> is a numeric suffix), and other duplicate references
+to substructures within C<$VAR>I<n> will be appropriately labeled using arrow
+notation. You can specify names for individual values to be dumped if you
+use the C<Dump()> method, or you can change the default C<$VAR> prefix to
+something else. See C<$Data::Dumper::Varname> and C<$Data::Dumper::Terse>
+below.
+
+The default output of self-referential structures can be C<eval>ed, but the
+nested references to C<$VAR>I<n> will be undefined, since a recursive
+structure cannot be constructed using one Perl statement. You should set the
+C<Purity> flag to 1 to get additional statements that will correctly fill in
+these references.
+
+In the extended usage form, the references to be dumped can be given
+user-specified names. If a name begins with a C<*>, the output will
+describe the dereferenced type of the supplied reference for hashes and
+arrays, and coderefs. Output of names will be avoided where possible if
+the C<Terse> flag is set.
+
+In many cases, methods that are used to set the internal state of the
+object will return the object itself, so method calls can be conveniently
+chained together.
+
+Several styles of output are possible, all controlled by setting
+the C<Indent> flag. See L<Configuration Variables or Methods> below
+for details.
+
+
+=head2 Methods
+
+=over 4
+
+=item I<PACKAGE>->new(I<ARRAYREF [>, I<ARRAYREF]>)
+
+Returns a newly created C<Data::Dumper> object. The first argument is an
+anonymous array of values to be dumped. The optional second argument is an
+anonymous array of names for the values. The names need not have a leading
+C<$> sign, and must be comprised of alphanumeric characters. You can begin
+a name with a C<*> to specify that the dereferenced type must be dumped
+instead of the reference itself, for ARRAY and HASH references.
+
+The prefix specified by C<$Data::Dumper::Varname> will be used with a
+numeric suffix if the name for a value is undefined.
+
+Data::Dumper will catalog all references encountered while dumping the
+values. Cross-references (in the form of names of substructures in perl
+syntax) will be inserted at all possible points, preserving any structural
+interdependencies in the original set of values. Structure traversal is
+depth-first, and proceeds in order from the first supplied value to
+the last.
+
+=item I<$OBJ>->Dump I<or> I<PACKAGE>->Dump(I<ARRAYREF [>, I<ARRAYREF]>)
+
+Returns the stringified form of the values stored in the object (preserving
+the order in which they were supplied to C<new>), subject to the
+configuration options below. In an array context, it returns a list
+of strings corresponding to the supplied values.
+
+The second form, for convenience, simply calls the C<new> method on its
+arguments before dumping the object immediately.
+
+=item I<$OBJ>->Dumpxs I<or> I<PACKAGE>->Dumpxs(I<ARRAYREF [>, I<ARRAYREF]>)
+
+This method is available if you were able to compile and install the XSUB
+extension to C<Data::Dumper>. It is exactly identical to the C<Dump> method
+above, only about 4 to 5 times faster, since it is written entirely in C.
+
+=item I<$OBJ>->Seen(I<[HASHREF]>)
+
+Queries or adds to the internal table of already encountered references.
+You must use C<Reset> to explicitly clear the table if needed. Such
+references are not dumped; instead, their names are inserted wherever they
+are encountered subsequently. This is useful especially for properly
+dumping subroutine references.
+
+Expects a anonymous hash of name => value pairs. Same rules apply for names
+as in C<new>. If no argument is supplied, will return the "seen" list of
+name => value pairs, in an array context. Otherwise, returns the object
+itself.
+
+=item I<$OBJ>->Values(I<[ARRAYREF]>)
+
+Queries or replaces the internal array of values that will be dumped.
+When called without arguments, returns the values. Otherwise, returns the
+object itself.
+
+=item I<$OBJ>->Names(I<[ARRAYREF]>)
+
+Queries or replaces the internal array of user supplied names for the values
+that will be dumped. When called without arguments, returns the names.
+Otherwise, returns the object itself.
+
+=item I<$OBJ>->Reset
+
+Clears the internal table of "seen" references and returns the object
+itself.
+
+=back
+
+=head2 Functions
+
+=over 4
+
+=item Dumper(I<LIST>)
+
+Returns the stringified form of the values in the list, subject to the
+configuration options below. The values will be named C<$VAR>I<n> in the
+output, where I<n> is a numeric suffix. Will return a list of strings
+in an array context.
+
+=item DumperX(I<LIST>)
+
+Identical to the C<Dumper()> function above, but this calls the XSUB
+implementation. Only available if you were able to compile and install
+the XSUB extensions in C<Data::Dumper>.
+
+=back
+
+=head2 Configuration Variables or Methods
+
+Several configuration variables can be used to control the kind of output
+generated when using the procedural interface. These variables are usually
+C<local>ized in a block so that other parts of the code are not affected by
+the change.
+
+These variables determine the default state of the object created by calling
+the C<new> method, but cannot be used to alter the state of the object
+thereafter. The equivalent method names should be used instead to query
+or set the internal state of the object.
+
+The method forms return the object itself when called with arguments,
+so that they can be chained together nicely.
+
+=over 4
+
+=item $Data::Dumper::Indent I<or> I<$OBJ>->Indent(I<[NEWVAL]>)
+
+Controls the style of indentation. It can be set to 0, 1, 2 or 3. Style 0
+spews output without any newlines, indentation, or spaces between list
+items. It is the most compact format possible that can still be called
+valid perl. Style 1 outputs a readable form with newlines but no fancy
+indentation (each level in the structure is simply indented by a fixed
+amount of whitespace). Style 2 (the default) outputs a very readable form
+which takes into account the length of hash keys (so the hash value lines
+up). Style 3 is like style 2, but also annotates the elements of arrays
+with their index (but the comment is on its own line, so array output
+consumes twice the number of lines). Style 2 is the default.
+
+=item $Data::Dumper::Purity I<or> I<$OBJ>->Purity(I<[NEWVAL]>)
+
+Controls the degree to which the output can be C<eval>ed to recreate the
+supplied reference structures. Setting it to 1 will output additional perl
+statements that will correctly recreate nested references. The default is
+0.
+
+=item $Data::Dumper::Pad I<or> I<$OBJ>->Pad(I<[NEWVAL]>)
+
+Specifies the string that will be prefixed to every line of the output.
+Empty string by default.
+
+=item $Data::Dumper::Varname I<or> I<$OBJ>->Varname(I<[NEWVAL]>)
+
+Contains the prefix to use for tagging variable names in the output. The
+default is "VAR".
+
+=item $Data::Dumper::Useqq I<or> I<$OBJ>->Useqq(I<[NEWVAL]>)
+
+When set, enables the use of double quotes for representing string values.
+Whitespace other than space will be represented as C<[\n\t\r]>, "unsafe"
+characters will be backslashed, and unprintable characters will be output as
+quoted octal integers. Since setting this variable imposes a performance
+penalty, the default is 0. The C<Dumpxs()> method does not honor this
+flag yet.
+
+=item $Data::Dumper::Terse I<or> I<$OBJ>->Terse(I<[NEWVAL]>)
+
+When set, Data::Dumper will emit single, non-self-referential values as
+atoms/terms rather than statements. This means that the C<$VAR>I<n> names
+will be avoided where possible, but be advised that such output may not
+always be parseable by C<eval>.
+
+=item $Data::Dumper::Freezer I<or> $I<OBJ>->Freezer(I<[NEWVAL]>)
+
+Can be set to a method name, or to an empty string to disable the feature.
+Data::Dumper will invoke that method via the object before attempting to
+stringify it. This method can alter the contents of the object (if, for
+instance, it contains data allocated from C), and even rebless it in a
+different package. The client is responsible for making sure the specified
+method can be called via the object, and that the object ends up containing
+only perl data types after the method has been called. Defaults to an empty
+string.
+
+=item $Data::Dumper::Toaster I<or> $I<OBJ>->Toaster(I<[NEWVAL]>)
+
+Can be set to a method name, or to an empty string to disable the feature.
+Data::Dumper will emit a method call for any objects that are to be dumped
+using the syntax C<bless(DATA, CLASS)->METHOD()>. Note that this means that
+the method specified will have to perform any modifications required on the
+object (like creating new state within it, and/or reblessing it in a
+different package) and then return it. The client is responsible for making
+sure the method can be called via the object, and that it returns a valid
+object. Defaults to an empty string.
+
+=item $Data::Dumper::Deepcopy I<or> $I<OBJ>->Deepcopy(I<[NEWVAL]>)
+
+Can be set to a boolean value to enable deep copies of structures.
+Cross-referencing will then only be done when absolutely essential
+(i.e., to break reference cycles). Default is 0.
+
+=item $Data::Dumper::Quotekeys I<or> $I<OBJ>->Quotekeys(I<[NEWVAL]>)
+
+Can be set to a boolean value to control whether hash keys are quoted.
+A false value will avoid quoting hash keys when it looks like a simple
+string. Default is 1, which will always enclose hash keys in quotes.
+
+=item $Data::Dumper::Bless I<or> $I<OBJ>->Bless(I<[NEWVAL]>)
+
+Can be set to a string that specifies an alternative to the C<bless>
+builtin operator used to create objects. A function with the specified
+name should exist, and should accept the same arguments as the builtin.
+Default is C<bless>.
+
+=back
+
+=head2 Exports
+
+=over 4
+
+=item Dumper
+
+=back
+
+=head1 EXAMPLES
+
+Run these code snippets to get a quick feel for the behavior of this
+module. When you are through with these examples, you may want to
+add or change the various configuration variables described above,
+to see their behavior. (See the testsuite in the Data::Dumper
+distribution for more examples.)
+
+
+ use Data::Dumper;
+
+ package Foo;
+ sub new {bless {'a' => 1, 'b' => sub { return "foo" }}, $_[0]};
+
+ package Fuz; # a weird REF-REF-SCALAR object
+ sub new {bless \($_ = \ 'fu\'z'), $_[0]};
+
+ package main;
+ $foo = Foo->new;
+ $fuz = Fuz->new;
+ $boo = [ 1, [], "abcd", \*foo,
+ {1 => 'a', 023 => 'b', 0x45 => 'c'},
+ \\"p\q\'r", $foo, $fuz];
+
+ ########
+ # simple usage
+ ########
+
+ $bar = eval(Dumper($boo));
+ print($@) if $@;
+ print Dumper($boo), Dumper($bar); # pretty print (no array indices)
+
+ $Data::Dumper::Terse = 1; # don't output names where feasible
+ $Data::Dumper::Indent = 0; # turn off all pretty print
+ print Dumper($boo), "\n";
+
+ $Data::Dumper::Indent = 1; # mild pretty print
+ print Dumper($boo);
+
+ $Data::Dumper::Indent = 3; # pretty print with array indices
+ print Dumper($boo);
+
+ $Data::Dumper::Useqq = 1; # print strings in double quotes
+ print Dumper($boo);
+
+
+ ########
+ # recursive structures
+ ########
+
+ @c = ('c');
+ $c = \@c;
+ $b = {};
+ $a = [1, $b, $c];
+ $b->{a} = $a;
+ $b->{b} = $a->[1];
+ $b->{c} = $a->[2];
+ print Data::Dumper->Dump([$a,$b,$c], [qw(a b c)]);
+
+
+ $Data::Dumper::Purity = 1; # fill in the holes for eval
+ print Data::Dumper->Dump([$a, $b], [qw(*a b)]); # print as @a
+ print Data::Dumper->Dump([$b, $a], [qw(*b a)]); # print as %b
+
+
+ $Data::Dumper::Deepcopy = 1; # avoid cross-refs
+ print Data::Dumper->Dump([$b, $a], [qw(*b a)]);
+
+
+ $Data::Dumper::Purity = 0; # avoid cross-refs
+ print Data::Dumper->Dump([$b, $a], [qw(*b a)]);
+
+
+ ########
+ # object-oriented usage
+ ########
+
+ $d = Data::Dumper->new([$a,$b], [qw(a b)]);
+ $d->Seen({'*c' => $c}); # stash a ref without printing it
+ $d->Indent(3);
+ print $d->Dump;
+ $d->Reset->Purity(0); # empty the seen cache
+ print join "----\n", $d->Dump;
+
+
+ ########
+ # persistence
+ ########
+
+ package Foo;
+ sub new { bless { state => 'awake' }, shift }
+ sub Freeze {
+ my $s = shift;
+ print STDERR "preparing to sleep\n";
+ $s->{state} = 'asleep';
+ return bless $s, 'Foo::ZZZ';
+ }
+
+ package Foo::ZZZ;
+ sub Thaw {
+ my $s = shift;
+ print STDERR "waking up\n";
+ $s->{state} = 'awake';
+ return bless $s, 'Foo';
+ }
+
+ package Foo;
+ use Data::Dumper;
+ $a = Foo->new;
+ $b = Data::Dumper->new([$a], ['c']);
+ $b->Freezer('Freeze');
+ $b->Toaster('Thaw');
+ $c = $b->Dump;
+ print $c;
+ $d = eval $c;
+ print Data::Dumper->Dump([$d], ['d']);
+
+
+ ########
+ # symbol substitution (useful for recreating CODE refs)
+ ########
+
+ sub foo { print "foo speaking\n" }
+ *other = \&foo;
+ $bar = [ \&other ];
+ $d = Data::Dumper->new([\&other,$bar],['*other','bar']);
+ $d->Seen({ '*foo' => \&foo });
+ print $d->Dump;
+
+
+=head1 BUGS
+
+Due to limitations of Perl subroutine call semantics, you cannot pass an
+array or hash. Prepend it with a C<\> to pass its reference instead. This
+will be remedied in time, with the arrival of prototypes in later versions
+of Perl. For now, you need to use the extended usage form, and prepend the
+name with a C<*> to output it as a hash or array.
+
+C<Data::Dumper> cheats with CODE references. If a code reference is
+encountered in the structure being processed, an anonymous subroutine that
+contains the string '"DUMMY"' will be inserted in its place, and a warning
+will be printed if C<Purity> is set. You can C<eval> the result, but bear
+in mind that the anonymous sub that gets created is just a placeholder.
+Someday, perl will have a switch to cache-on-demand the string
+representation of a compiled piece of code, I hope. If you have prior
+knowledge of all the code refs that your data structures are likely
+to have, you can use the C<Seen> method to pre-seed the internal reference
+table and make the dumped output point to them, instead. See L<EXAMPLES>
+above.
+
+The C<Useqq> flag is not honored by C<Dumpxs()> (it always outputs
+strings in single quotes).
+
+SCALAR objects have the weirdest looking C<bless> workaround.
+
+
+=head1 AUTHOR
+
+Gurusamy Sarathy gsar at umich.edu
+
+Copyright (c) 1996-98 Gurusamy Sarathy. All rights reserved.
+This program is free software; you can redistribute it and/or
+modify it under the same terms as Perl itself.
+
+
+=head1 VERSION
+
+Version 2.101 (30 Apr 1999)
+
+=head1 SEE ALSO
+
+perl(1)
+
+=cut
Added: trunk/orca/packages/Data-Dumper-2.101/Dumper.xs
==============================================================================
--- trunk/orca/packages/Data-Dumper-2.101/Dumper.xs (original)
+++ trunk/orca/packages/Data-Dumper-2.101/Dumper.xs Sat Jul 13 19:22:30 2002
@@ -0,0 +1,864 @@
+#include "EXTERN.h"
+#include "perl.h"
+#include "XSUB.h"
+
+#ifndef PERL_VERSION
+#include "patchlevel.h"
+#define PERL_VERSION PATCHLEVEL
+#endif
+
+#if PERL_VERSION < 5
+# ifndef PL_sv_undef
+# define PL_sv_undef sv_undef
+# endif
+# ifndef ERRSV
+# define ERRSV GvSV(errgv)
+# endif
+# ifndef newSVpvn
+# define newSVpvn newSVpv
+# endif
+#endif
+
+static I32 num_q _((char *s, STRLEN slen));
+static I32 esc_q _((char *dest, char *src, STRLEN slen));
+static SV *sv_x _((SV *sv, char *str, STRLEN len, I32 n));
+static I32 DD_dump _((SV *val, char *name, STRLEN namelen, SV *retval,
+ HV *seenhv, AV *postav, I32 *levelp, I32 indent,
+ SV *pad, SV *xpad, SV *apad, SV *sep,
+ SV *freezer, SV *toaster,
+ I32 purity, I32 deepcopy, I32 quotekeys, SV *bless));
+
+/* does a string need to be protected? */
+static I32
+needs_quote(register char *s)
+{
+TOP:
+ if (s[0] == ':') {
+ if (*++s) {
+ if (*s++ != ':')
+ return 1;
+ }
+ else
+ return 1;
+ }
+ if (isIDFIRST(*s)) {
+ while (*++s)
+ if (!isALNUM(*s))
+ if (*s == ':')
+ goto TOP;
+ else
+ return 1;
+ }
+ else
+ return 1;
+ return 0;
+}
+
+/* count the number of "'"s and "\"s in string */
+static I32
+num_q(register char *s, register STRLEN slen)
+{
+ register I32 ret = 0;
+
+ while (slen > 0) {
+ if (*s == '\'' || *s == '\\')
+ ++ret;
+ ++s;
+ --slen;
+ }
+ return ret;
+}
+
+
+/* returns number of chars added to escape "'"s and "\"s in s */
+/* slen number of characters in s will be escaped */
+/* destination must be long enough for additional chars */
+static I32
+esc_q(register char *d, register char *s, register STRLEN slen)
+{
+ register I32 ret = 0;
+
+ while (slen > 0) {
+ switch (*s) {
+ case '\'':
+ case '\\':
+ *d = '\\';
+ ++d; ++ret;
+ default:
+ *d = *s;
+ ++d; ++s; --slen;
+ break;
+ }
+ }
+ return ret;
+}
+
+/* append a repeated string to an SV */
+static SV *
+sv_x(SV *sv, register char *str, STRLEN len, I32 n)
+{
+ if (sv == Nullsv)
+ sv = newSVpvn("", 0);
+ else
+ assert(SvTYPE(sv) >= SVt_PV);
+
+ if (n > 0) {
+ SvGROW(sv, len*n + SvCUR(sv) + 1);
+ if (len == 1) {
+ char *start = SvPVX(sv) + SvCUR(sv);
+ SvCUR(sv) += n;
+ start[n] = '\0';
+ while (n > 0)
+ start[--n] = str[0];
+ }
+ else
+ while (n > 0) {
+ sv_catpvn(sv, str, len);
+ --n;
+ }
+ }
+ return sv;
+}
+
+/*
+ * This ought to be split into smaller functions. (it is one long function since
+ * it exactly parallels the perl version, which was one long thing for
+ * efficiency raisins.) Ugggh!
+ */
+static I32
+DD_dump(SV *val, char *name, STRLEN namelen, SV *retval, HV *seenhv,
+ AV *postav, I32 *levelp, I32 indent, SV *pad, SV *xpad,
+ SV *apad, SV *sep, SV *freezer, SV *toaster, I32 purity,
+ I32 deepcopy, I32 quotekeys, SV *bless)
+{
+ char tmpbuf[128];
+ U32 i;
+ char *c, *r, *realpack, id[128];
+ SV **svp;
+ SV *sv, *ipad, *ival;
+ SV *blesspad = Nullsv;
+ AV *seenentry = Nullav;
+ char *iname;
+ STRLEN inamelen, idlen = 0;
+ U32 flags;
+ U32 realtype;
+
+ if (!val)
+ return 0;
+
+ flags = SvFLAGS(val);
+ realtype = SvTYPE(val);
+
+ if (SvGMAGICAL(val))
+ mg_get(val);
+ if (SvROK(val)) {
+
+ if (SvOBJECT(SvRV(val)) && freezer &&
+ SvPOK(freezer) && SvCUR(freezer))
+ {
+ dSP; ENTER; SAVETMPS; PUSHMARK(sp);
+ XPUSHs(val); PUTBACK;
+ i = perl_call_method(SvPVX(freezer), G_EVAL|G_SCALAR);
+ SPAGAIN;
+ if (SvTRUE(ERRSV))
+ warn("WARNING(Freezer method call failed): %s",
+ SvPVX(ERRSV));
+ else if (i)
+ val = newSVsv(POPs);
+ PUTBACK; FREETMPS; LEAVE;
+ if (i)
+ (void)sv_2mortal(val);
+ }
+
+ ival = SvRV(val);
+ flags = SvFLAGS(ival);
+ realtype = SvTYPE(ival);
+ (void) sprintf(id, "0x%lx", (unsigned long)ival);
+ idlen = strlen(id);
+ if (SvOBJECT(ival))
+ realpack = HvNAME(SvSTASH(ival));
+ else
+ realpack = Nullch;
+
+ /* if it has a name, we need to either look it up, or keep a tab
+ * on it so we know when we hit it later
+ */
+ if (namelen) {
+ if ((svp = hv_fetch(seenhv, id, idlen, FALSE))
+ && (sv = *svp) && SvROK(sv) && (seenentry = (AV*)SvRV(sv)))
+ {
+ SV *othername;
+ if ((svp = av_fetch(seenentry, 0, FALSE))
+ && (othername = *svp))
+ {
+ if (purity && *levelp > 0) {
+ SV *postentry;
+
+ if (realtype == SVt_PVHV)
+ sv_catpvn(retval, "{}", 2);
+ else if (realtype == SVt_PVAV)
+ sv_catpvn(retval, "[]", 2);
+ else
+ sv_catpvn(retval, "''", 2);
+ postentry = newSVpvn(name, namelen);
+ sv_catpvn(postentry, " = ", 3);
+ sv_catsv(postentry, othername);
+ av_push(postav, postentry);
+ }
+ else {
+ if (name[0] == '@' || name[0] == '%') {
+ if ((SvPVX(othername))[0] == '\\' &&
+ (SvPVX(othername))[1] == name[0]) {
+ sv_catpvn(retval, SvPVX(othername)+1,
+ SvCUR(othername)-1);
+ }
+ else {
+ sv_catpvn(retval, name, 1);
+ sv_catpvn(retval, "{", 1);
+ sv_catsv(retval, othername);
+ sv_catpvn(retval, "}", 1);
+ }
+ }
+ else
+ sv_catsv(retval, othername);
+ }
+ return 1;
+ }
+ else {
+ warn("ref name not found for %s", id);
+ return 0;
+ }
+ }
+ else { /* store our name and continue */
+ SV *namesv;
+ if (name[0] == '@' || name[0] == '%') {
+ namesv = newSVpvn("\\", 1);
+ sv_catpvn(namesv, name, namelen);
+ }
+ else if (realtype == SVt_PVCV && name[0] == '*') {
+ namesv = newSVpvn("\\", 2);
+ sv_catpvn(namesv, name, namelen);
+ (SvPVX(namesv))[1] = '&';
+ }
+ else
+ namesv = newSVpvn(name, namelen);
+ seenentry = newAV();
+ av_push(seenentry, namesv);
+ (void)SvREFCNT_inc(val);
+ av_push(seenentry, val);
+ (void)hv_store(seenhv, id, strlen(id),
+ newRV((SV*)seenentry), 0);
+ SvREFCNT_dec(seenentry);
+ }
+ }
+
+ (*levelp)++;
+ ipad = sv_x(Nullsv, SvPVX(xpad), SvCUR(xpad), *levelp);
+
+ if (realpack) { /* we have a blessed ref */
+ STRLEN blesslen;
+ char *blessstr = SvPV(bless, blesslen);
+ sv_catpvn(retval, blessstr, blesslen);
+ sv_catpvn(retval, "( ", 2);
+ if (indent >= 2) {
+ blesspad = apad;
+ apad = newSVsv(apad);
+ sv_x(apad, " ", 1, blesslen+2);
+ }
+ }
+
+ if (realtype <= SVt_PVBM) { /* scalar ref */
+ SV *namesv = newSVpvn("${", 2);
+ sv_catpvn(namesv, name, namelen);
+ sv_catpvn(namesv, "}", 1);
+ if (realpack) { /* blessed */
+ sv_catpvn(retval, "do{\\(my $o = ", 13);
+ DD_dump(ival, SvPVX(namesv), SvCUR(namesv), retval, seenhv,
+ postav, levelp, indent, pad, xpad, apad, sep,
+ freezer, toaster, purity, deepcopy, quotekeys, bless);
+ sv_catpvn(retval, ")}", 2);
+ } /* plain */
+ else {
+ sv_catpvn(retval, "\\", 1);
+ DD_dump(ival, SvPVX(namesv), SvCUR(namesv), retval, seenhv,
+ postav, levelp, indent, pad, xpad, apad, sep,
+ freezer, toaster, purity, deepcopy, quotekeys, bless);
+ }
+ SvREFCNT_dec(namesv);
+ }
+ else if (realtype == SVt_PVGV) { /* glob ref */
+ SV *namesv = newSVpvn("*{", 2);
+ sv_catpvn(namesv, name, namelen);
+ sv_catpvn(namesv, "}", 1);
+ sv_catpvn(retval, "\\", 1);
+ DD_dump(ival, SvPVX(namesv), SvCUR(namesv), retval, seenhv,
+ postav, levelp, indent, pad, xpad, apad, sep,
+ freezer, toaster, purity, deepcopy, quotekeys, bless);
+ SvREFCNT_dec(namesv);
+ }
+ else if (realtype == SVt_PVAV) {
+ SV *totpad;
+ I32 ix = 0;
+ I32 ixmax = av_len((AV *)ival);
+
+ SV *ixsv = newSViv(0);
+ /* allowing for a 24 char wide array index */
+ New(0, iname, namelen+28, char);
+ (void)strcpy(iname, name);
+ inamelen = namelen;
+ if (name[0] == '@') {
+ sv_catpvn(retval, "(", 1);
+ iname[0] = '$';
+ }
+ else {
+ sv_catpvn(retval, "[", 1);
+ /* omit "->" in $foo{bar}->[0], but not in ${$foo}->[0] */
+ /*if (namelen > 0
+ && name[namelen-1] != ']' && name[namelen-1] != '}'
+ && (namelen < 4 || (name[1] != '{' && name[2] != '{')))*/
+ if ((namelen > 0
+ && name[namelen-1] != ']' && name[namelen-1] != '}')
+ || (namelen > 4
+ && (name[1] == '{'
+ || (name[0] == '\\' && name[2] == '{'))))
+ {
+ iname[inamelen++] = '-'; iname[inamelen++] = '>';
+ iname[inamelen] = '\0';
+ }
+ }
+ if (iname[0] == '*' && iname[inamelen-1] == '}' && inamelen >= 8 &&
+ (instr(iname+inamelen-8, "{SCALAR}") ||
+ instr(iname+inamelen-7, "{ARRAY}") ||
+ instr(iname+inamelen-6, "{HASH}"))) {
+ iname[inamelen++] = '-'; iname[inamelen++] = '>';
+ }
+ iname[inamelen++] = '['; iname[inamelen] = '\0';
+ totpad = newSVsv(sep);
+ sv_catsv(totpad, pad);
+ sv_catsv(totpad, apad);
+
+ for (ix = 0; ix <= ixmax; ++ix) {
+ STRLEN ilen;
+ SV *elem;
+ svp = av_fetch((AV*)ival, ix, FALSE);
+ if (svp)
+ elem = *svp;
+ else
+ elem = &PL_sv_undef;
+
+ ilen = inamelen;
+ sv_setiv(ixsv, ix);
+ (void) sprintf(iname+ilen, "%ld", ix);
+ ilen = strlen(iname);
+ iname[ilen++] = ']'; iname[ilen] = '\0';
+ if (indent >= 3) {
+ sv_catsv(retval, totpad);
+ sv_catsv(retval, ipad);
+ sv_catpvn(retval, "#", 1);
+ sv_catsv(retval, ixsv);
+ }
+ sv_catsv(retval, totpad);
+ sv_catsv(retval, ipad);
+ DD_dump(elem, iname, ilen, retval, seenhv, postav,
+ levelp, indent, pad, xpad, apad, sep,
+ freezer, toaster, purity, deepcopy, quotekeys, bless);
+ if (ix < ixmax)
+ sv_catpvn(retval, ",", 1);
+ }
+ if (ixmax >= 0) {
+ SV *opad = sv_x(Nullsv, SvPVX(xpad), SvCUR(xpad), (*levelp)-1);
+ sv_catsv(retval, totpad);
+ sv_catsv(retval, opad);
+ SvREFCNT_dec(opad);
+ }
+ if (name[0] == '@')
+ sv_catpvn(retval, ")", 1);
+ else
+ sv_catpvn(retval, "]", 1);
+ SvREFCNT_dec(ixsv);
+ SvREFCNT_dec(totpad);
+ Safefree(iname);
+ }
+ else if (realtype == SVt_PVHV) {
+ SV *totpad, *newapad;
+ SV *iname, *sname;
+ HE *entry;
+ char *key;
+ I32 klen;
+ SV *hval;
+
+ iname = newSVpvn(name, namelen);
+ if (name[0] == '%') {
+ sv_catpvn(retval, "(", 1);
+ (SvPVX(iname))[0] = '$';
+ }
+ else {
+ sv_catpvn(retval, "{", 1);
+ /* omit "->" in $foo[0]->{bar}, but not in ${$foo}->{bar} */
+ if ((namelen > 0
+ && name[namelen-1] != ']' && name[namelen-1] != '}')
+ || (namelen > 4
+ && (name[1] == '{'
+ || (name[0] == '\\' && name[2] == '{'))))
+ {
+ sv_catpvn(iname, "->", 2);
+ }
+ }
+ if (name[0] == '*' && name[namelen-1] == '}' && namelen >= 8 &&
+ (instr(name+namelen-8, "{SCALAR}") ||
+ instr(name+namelen-7, "{ARRAY}") ||
+ instr(name+namelen-6, "{HASH}"))) {
+ sv_catpvn(iname, "->", 2);
+ }
+ sv_catpvn(iname, "{", 1);
+ totpad = newSVsv(sep);
+ sv_catsv(totpad, pad);
+ sv_catsv(totpad, apad);
+
+ (void)hv_iterinit((HV*)ival);
+ i = 0;
+ while ((entry = hv_iternext((HV*)ival))) {
+ char *nkey;
+ I32 nticks = 0;
+
+ if (i)
+ sv_catpvn(retval, ",", 1);
+ i++;
+ key = hv_iterkey(entry, &klen);
+ hval = hv_iterval((HV*)ival, entry);
+
+ if (quotekeys || needs_quote(key)) {
+ nticks = num_q(key, klen);
+ New(0, nkey, klen+nticks+3, char);
+ nkey[0] = '\'';
+ if (nticks)
+ klen += esc_q(nkey+1, key, klen);
+ else
+ (void)Copy(key, nkey+1, klen, char);
+ nkey[++klen] = '\'';
+ nkey[++klen] = '\0';
+ }
+ else {
+ New(0, nkey, klen, char);
+ (void)Copy(key, nkey, klen, char);
+ }
+
+ sname = newSVsv(iname);
+ sv_catpvn(sname, nkey, klen);
+ sv_catpvn(sname, "}", 1);
+
+ sv_catsv(retval, totpad);
+ sv_catsv(retval, ipad);
+ sv_catpvn(retval, nkey, klen);
+ sv_catpvn(retval, " => ", 4);
+ if (indent >= 2) {
+ char *extra;
+ I32 elen = 0;
+ newapad = newSVsv(apad);
+ New(0, extra, klen+4+1, char);
+ while (elen < (klen+4))
+ extra[elen++] = ' ';
+ extra[elen] = '\0';
+ sv_catpvn(newapad, extra, elen);
+ Safefree(extra);
+ }
+ else
+ newapad = apad;
+
+ DD_dump(hval, SvPVX(sname), SvCUR(sname), retval, seenhv,
+ postav, levelp, indent, pad, xpad, newapad, sep,
+ freezer, toaster, purity, deepcopy, quotekeys, bless);
+ SvREFCNT_dec(sname);
+ Safefree(nkey);
+ if (indent >= 2)
+ SvREFCNT_dec(newapad);
+ }
+ if (i) {
+ SV *opad = sv_x(Nullsv, SvPVX(xpad), SvCUR(xpad), *levelp-1);
+ sv_catsv(retval, totpad);
+ sv_catsv(retval, opad);
+ SvREFCNT_dec(opad);
+ }
+ if (name[0] == '%')
+ sv_catpvn(retval, ")", 1);
+ else
+ sv_catpvn(retval, "}", 1);
+ SvREFCNT_dec(iname);
+ SvREFCNT_dec(totpad);
+ }
+ else if (realtype == SVt_PVCV) {
+ sv_catpvn(retval, "sub { \"DUMMY\" }", 15);
+ if (purity)
+ warn("Encountered CODE ref, using dummy placeholder");
+ }
+ else {
+ warn("cannot handle ref type %ld", realtype);
+ }
+
+ if (realpack) { /* free blessed allocs */
+ if (indent >= 2) {
+ SvREFCNT_dec(apad);
+ apad = blesspad;
+ }
+ sv_catpvn(retval, ", '", 3);
+ sv_catpvn(retval, realpack, strlen(realpack));
+ sv_catpvn(retval, "' )", 3);
+ if (toaster && SvPOK(toaster) && SvCUR(toaster)) {
+ sv_catpvn(retval, "->", 2);
+ sv_catsv(retval, toaster);
+ sv_catpvn(retval, "()", 2);
+ }
+ }
+ SvREFCNT_dec(ipad);
+ (*levelp)--;
+ }
+ else {
+ STRLEN i;
+
+ if (namelen) {
+ (void) sprintf(id, "0x%lx", (unsigned long)val);
+ if ((svp = hv_fetch(seenhv, id, (idlen = strlen(id)), FALSE)) &&
+ (sv = *svp) && SvROK(sv) &&
+ (seenentry = (AV*)SvRV(sv)))
+ {
+ SV *othername;
+ if ((svp = av_fetch(seenentry, 0, FALSE)) && (othername = *svp)
+ && (svp = av_fetch(seenentry, 2, FALSE)) && *svp && SvIV(*svp) > 0)
+ {
+ sv_catpvn(retval, "${", 2);
+ sv_catsv(retval, othername);
+ sv_catpvn(retval, "}", 1);
+ return 1;
+ }
+ }
+ else {
+ SV *namesv;
+ namesv = newSVpvn("\\", 1);
+ sv_catpvn(namesv, name, namelen);
+ seenentry = newAV();
+ av_push(seenentry, namesv);
+ av_push(seenentry, newRV(val));
+ (void)hv_store(seenhv, id, strlen(id), newRV((SV*)seenentry), 0);
+ SvREFCNT_dec(seenentry);
+ }
+ }
+
+ if (SvIOK(val)) {
+ STRLEN len;
+ i = SvIV(val);
+ (void) sprintf(tmpbuf, "%d", i);
+ len = strlen(tmpbuf);
+ sv_catpvn(retval, tmpbuf, len);
+ }
+ else if (realtype == SVt_PVGV) {/* GLOBs can end up with scribbly names */
+ c = SvPV(val, i);
+ ++c; --i; /* just get the name */
+ if (i >= 6 && strncmp(c, "main::", 6) == 0) {
+ c += 4;
+ i -= 4;
+ }
+ if (needs_quote(c)) {
+ sv_grow(retval, SvCUR(retval)+6+2*i);
+ r = SvPVX(retval)+SvCUR(retval);
+ r[0] = '*'; r[1] = '{'; r[2] = '\'';
+ i += esc_q(r+3, c, i);
+ i += 3;
+ r[i++] = '\''; r[i++] = '}';
+ r[i] = '\0';
+ }
+ else {
+ sv_grow(retval, SvCUR(retval)+i+2);
+ r = SvPVX(retval)+SvCUR(retval);
+ r[0] = '*'; strcpy(r+1, c);
+ i++;
+ }
+ SvCUR_set(retval, SvCUR(retval)+i);
+
+ if (purity) {
+ static char *entries[] = { "{SCALAR}", "{ARRAY}", "{HASH}" };
+ static STRLEN sizes[] = { 8, 7, 6 };
+ SV *e;
+ SV *nname = newSVpvn("", 0);
+ SV *newapad = newSVpvn("", 0);
+ GV *gv = (GV*)val;
+ I32 j;
+
+ for (j=0; j<3; j++) {
+ e = ((j == 0) ? GvSV(gv) : (j == 1) ? (SV*)GvAV(gv) : (SV*)GvHV(gv));
+ if (!e)
+ continue;
+ if (j == 0 && !SvOK(e))
+ continue;
+
+ {
+ I32 nlevel = 0;
+ SV *postentry = newSVpvn(r,i);
+
+ sv_setsv(nname, postentry);
+ sv_catpvn(nname, entries[j], sizes[j]);
+ sv_catpvn(postentry, " = ", 3);
+ av_push(postav, postentry);
+ e = newRV(e);
+
+ SvCUR(newapad) = 0;
+ if (indent >= 2)
+ (void)sv_x(newapad, " ", 1, SvCUR(postentry));
+
+ DD_dump(e, SvPVX(nname), SvCUR(nname), postentry,
+ seenhv, postav, &nlevel, indent, pad, xpad,
+ newapad, sep, freezer, toaster, purity,
+ deepcopy, quotekeys, bless);
+ SvREFCNT_dec(e);
+ }
+ }
+
+ SvREFCNT_dec(newapad);
+ SvREFCNT_dec(nname);
+ }
+ }
+ else if (val == &PL_sv_undef || !SvOK(val)) {
+ sv_catpvn(retval, "undef", 5);
+ }
+ else {
+ c = SvPV(val, i);
+ sv_grow(retval, SvCUR(retval)+3+2*i);
+ r = SvPVX(retval)+SvCUR(retval);
+ r[0] = '\'';
+ i += esc_q(r+1, c, i);
+ ++i;
+ r[i++] = '\'';
+ r[i] = '\0';
+ SvCUR_set(retval, SvCUR(retval)+i);
+ }
+ }
+
+ if (idlen) {
+ if (deepcopy)
+ (void)hv_delete(seenhv, id, idlen, G_DISCARD);
+ else if (namelen && seenentry) {
+ SV *mark = *av_fetch(seenentry, 2, TRUE);
+ sv_setiv(mark,1);
+ }
+ }
+ return 1;
+}
+
+
+MODULE = Data::Dumper PACKAGE = Data::Dumper PREFIX = Data_Dumper_
+
+#
+# This is the exact equivalent of Dump. Well, almost. The things that are
+# different as of now (due to Laziness):
+# * doesnt do double-quotes yet.
+#
+
+void
+Data_Dumper_Dumpxs(href, ...)
+ SV *href;
+ PROTOTYPE: $;$$
+ PPCODE:
+ {
+ HV *hv;
+ SV *retval, *valstr;
+ HV *seenhv = Nullhv;
+ AV *postav, *todumpav, *namesav;
+ I32 level = 0;
+ I32 indent, terse, useqq, i, imax, postlen;
+ SV **svp;
+ SV *val, *name, *pad, *xpad, *apad, *sep, *tmp, *varname;
+ SV *freezer, *toaster, *bless;
+ I32 purity, deepcopy, quotekeys;
+ char tmpbuf[1024];
+ I32 gimme = GIMME;
+
+ if (!SvROK(href)) { /* call new to get an object first */
+ SV *valarray;
+ SV *namearray;
+
+ if (items == 3) {
+ valarray = ST(1);
+ namearray = ST(2);
+ }
+ else
+ croak("Usage: Data::Dumper::Dumpxs(PACKAGE, VAL_ARY_REF, NAME_ARY_REF)");
+
+ ENTER;
+ SAVETMPS;
+
+ PUSHMARK(sp);
+ XPUSHs(href);
+ XPUSHs(sv_2mortal(newSVsv(valarray)));
+ XPUSHs(sv_2mortal(newSVsv(namearray)));
+ PUTBACK;
+ i = perl_call_method("new", G_SCALAR);
+ SPAGAIN;
+ if (i)
+ href = newSVsv(POPs);
+
+ PUTBACK;
+ FREETMPS;
+ LEAVE;
+ if (i)
+ (void)sv_2mortal(href);
+ }
+
+ todumpav = namesav = Nullav;
+ seenhv = Nullhv;
+ val = pad = xpad = apad = sep = tmp = varname
+ = freezer = toaster = bless = &PL_sv_undef;
+ name = sv_newmortal();
+ indent = 2;
+ terse = useqq = purity = deepcopy = 0;
+ quotekeys = 1;
+
+ retval = newSVpvn("", 0);
+ if (SvROK(href)
+ && (hv = (HV*)SvRV((SV*)href))
+ && SvTYPE(hv) == SVt_PVHV) {
+
+ if ((svp = hv_fetch(hv, "seen", 4, FALSE)) && SvROK(*svp))
+ seenhv = (HV*)SvRV(*svp);
+ if ((svp = hv_fetch(hv, "todump", 6, FALSE)) && SvROK(*svp))
+ todumpav = (AV*)SvRV(*svp);
+ if ((svp = hv_fetch(hv, "names", 5, FALSE)) && SvROK(*svp))
+ namesav = (AV*)SvRV(*svp);
+ if ((svp = hv_fetch(hv, "indent", 6, FALSE)))
+ indent = SvIV(*svp);
+ if ((svp = hv_fetch(hv, "purity", 6, FALSE)))
+ purity = SvIV(*svp);
+ if ((svp = hv_fetch(hv, "terse", 5, FALSE)))
+ terse = SvTRUE(*svp);
+ if ((svp = hv_fetch(hv, "useqq", 5, FALSE)))
+ useqq = SvTRUE(*svp);
+ if ((svp = hv_fetch(hv, "pad", 3, FALSE)))
+ pad = *svp;
+ if ((svp = hv_fetch(hv, "xpad", 4, FALSE)))
+ xpad = *svp;
+ if ((svp = hv_fetch(hv, "apad", 4, FALSE)))
+ apad = *svp;
+ if ((svp = hv_fetch(hv, "sep", 3, FALSE)))
+ sep = *svp;
+ if ((svp = hv_fetch(hv, "varname", 7, FALSE)))
+ varname = *svp;
+ if ((svp = hv_fetch(hv, "freezer", 7, FALSE)))
+ freezer = *svp;
+ if ((svp = hv_fetch(hv, "toaster", 7, FALSE)))
+ toaster = *svp;
+ if ((svp = hv_fetch(hv, "deepcopy", 8, FALSE)))
+ deepcopy = SvTRUE(*svp);
+ if ((svp = hv_fetch(hv, "quotekeys", 9, FALSE)))
+ quotekeys = SvTRUE(*svp);
+ if ((svp = hv_fetch(hv, "bless", 5, FALSE)))
+ bless = *svp;
+ postav = newAV();
+
+ if (todumpav)
+ imax = av_len(todumpav);
+ else
+ imax = -1;
+ valstr = newSVpvn("",0);
+ for (i = 0; i <= imax; ++i) {
+ SV *newapad;
+
+ av_clear(postav);
+ if ((svp = av_fetch(todumpav, i, FALSE)))
+ val = *svp;
+ else
+ val = &PL_sv_undef;
+ if ((svp = av_fetch(namesav, i, TRUE)))
+ sv_setsv(name, *svp);
+ else
+ SvOK_off(name);
+
+ if (SvOK(name)) {
+ if ((SvPVX(name))[0] == '*') {
+ if (SvROK(val)) {
+ switch (SvTYPE(SvRV(val))) {
+ case SVt_PVAV:
+ (SvPVX(name))[0] = '@';
+ break;
+ case SVt_PVHV:
+ (SvPVX(name))[0] = '%';
+ break;
+ case SVt_PVCV:
+ (SvPVX(name))[0] = '*';
+ break;
+ default:
+ (SvPVX(name))[0] = '$';
+ break;
+ }
+ }
+ else
+ (SvPVX(name))[0] = '$';
+ }
+ else if ((SvPVX(name))[0] != '$')
+ sv_insert(name, 0, 0, "$", 1);
+ }
+ else {
+ STRLEN nchars = 0;
+ sv_setpvn(name, "$", 1);
+ sv_catsv(name, varname);
+ (void) sprintf(tmpbuf, "%ld", i+1);
+ nchars = strlen(tmpbuf);
+ sv_catpvn(name, tmpbuf, nchars);
+ }
+
+ if (indent >= 2) {
+ SV *tmpsv = sv_x(Nullsv, " ", 1, SvCUR(name)+3);
+ newapad = newSVsv(apad);
+ sv_catsv(newapad, tmpsv);
+ SvREFCNT_dec(tmpsv);
+ }
+ else
+ newapad = apad;
+
+ DD_dump(val, SvPVX(name), SvCUR(name), valstr, seenhv,
+ postav, &level, indent, pad, xpad, newapad, sep,
+ freezer, toaster, purity, deepcopy, quotekeys,
+ bless);
+
+ if (indent >= 2)
+ SvREFCNT_dec(newapad);
+
+ postlen = av_len(postav);
+ if (postlen >= 0 || !terse) {
+ sv_insert(valstr, 0, 0, " = ", 3);
+ sv_insert(valstr, 0, 0, SvPVX(name), SvCUR(name));
+ sv_catpvn(valstr, ";", 1);
+ }
+ sv_catsv(retval, pad);
+ sv_catsv(retval, valstr);
+ sv_catsv(retval, sep);
+ if (postlen >= 0) {
+ I32 i;
+ sv_catsv(retval, pad);
+ for (i = 0; i <= postlen; ++i) {
+ SV *elem;
+ svp = av_fetch(postav, i, FALSE);
+ if (svp && (elem = *svp)) {
+ sv_catsv(retval, elem);
+ if (i < postlen) {
+ sv_catpvn(retval, ";", 1);
+ sv_catsv(retval, sep);
+ sv_catsv(retval, pad);
+ }
+ }
+ }
+ sv_catpvn(retval, ";", 1);
+ sv_catsv(retval, sep);
+ }
+ sv_setpvn(valstr, "", 0);
+ if (gimme == G_ARRAY) {
+ XPUSHs(sv_2mortal(retval));
+ if (i < imax) /* not the last time thro ? */
+ retval = newSVpvn("",0);
+ }
+ }
+ SvREFCNT_dec(postav);
+ SvREFCNT_dec(valstr);
+ }
+ else
+ croak("Call to new() method failed to return HASH ref");
+ if (gimme == G_SCALAR)
+ XPUSHs(sv_2mortal(retval));
+ }
Added: trunk/orca/packages/Data-Dumper-2.101/Todo
==============================================================================
--- trunk/orca/packages/Data-Dumper-2.101/Todo (original)
+++ trunk/orca/packages/Data-Dumper-2.101/Todo Sat Jul 13 19:22:30 2002
@@ -0,0 +1,34 @@
+=head1 NAME
+
+TODO - seeds germane, yet not germinated
+
+=head1 DESCRIPTION
+
+The following functionality will be supported in the next few releases.
+
+=over 4
+
+=item $Data::Dumper::Maxdepth I<or> $I<OBJ>->Maxdepth(I<NEWVAL>)
+
+Depth beyond which we don't venture into a structure. Has no effect when
+C<Data::Dumper::Purity> is set. (useful in debugger when we often don't
+want to see more than enough).
+
+=item $Data::Dumper::Expdepth I<or> $I<OBJ>->Expdepth(I<NEWVAL>)
+
+Dump contents explicitly up to a certain depth and then use names for
+cross-referencing identical references. (useful in debugger, in situations
+where we don't care so much for cross-references).
+
+=item Make C<Dumpxs()> honor C<$Useqq>
+
+=item Fix formatting when Terse is set and Indent >= 2
+
+=item Output space after '\' (ref constructor) for high enough Indent
+
+=item Implement redesign that allows various backends (Perl, Lisp,
+some-binary-data-format, graph-description-languages, etc.)
+
+=item Dump traversal in breadth-first order
+
+=back
Added: trunk/orca/packages/Data-Dumper-2.101/MANIFEST
==============================================================================
--- trunk/orca/packages/Data-Dumper-2.101/MANIFEST (original)
+++ trunk/orca/packages/Data-Dumper-2.101/MANIFEST Sat Jul 13 19:22:30 2002
@@ -0,0 +1,12 @@
+README
+MANIFEST
+MANIFEST.NOXSUB
+Changes
+Todo
+Makefile.PL
+Dumper.pm
+Dumper.pm.NOXSUB
+Dumper.xs
+Dumper.html
+t/dumper.t
+t/overload.t
Added: trunk/orca/packages/Data-Dumper-2.101/Makefile.PL
==============================================================================
--- trunk/orca/packages/Data-Dumper-2.101/Makefile.PL (original)
+++ trunk/orca/packages/Data-Dumper-2.101/Makefile.PL Sat Jul 13 19:22:30 2002
@@ -0,0 +1,45 @@
+use ExtUtils::MakeMaker;
+use File::Copy qw();
+my $arg = $ARGV[0] || "";
+my($DOXS, $DONOXS);
+$DONOXS = shift if $arg =~ /^noxs/i and
+ -f "MANIFEST.NOXSUB" and
+ -f "Dumper.pm.NOXSUB";
+$DOXS = shift if $arg =~ /^xs/i and
+ -f "MANIFEST.XSUB" and
+ -f "Dumper.xs.XSUB" and
+ -f "Dumper.pm.XSUB";
+
+if ($DONOXS) {
+ print STDERR "Disabling XS in sources...\n";
+ die "***** Failed, sources could be inconsistent! *****\n" unless
+ File::Copy::move('MANIFEST', 'MANIFEST.XSUB') and
+ File::Copy::move('MANIFEST.NOXSUB', 'MANIFEST') and
+ File::Copy::move('Dumper.pm', 'Dumper.pm.XSUB') and
+ File::Copy::move('Dumper.xs', 'Dumper.xs.XSUB') and
+ File::Copy::move('Dumper.pm.NOXSUB','Dumper.pm');
+}
+elsif ($DOXS) {
+ print STDERR "Enabling XS in sources...\n";
+ die "***** Failed, sources could be inconsistent! *****\n" unless
+ File::Copy::move('MANIFEST', 'MANIFEST.NOXSUB') and
+ File::Copy::move('MANIFEST.XSUB', 'MANIFEST') and
+ File::Copy::move('Dumper.pm', 'Dumper.pm.NOXSUB') and
+ File::Copy::move('Dumper.xs.XSUB', 'Dumper.xs') and
+ File::Copy::move('Dumper.pm.XSUB', 'Dumper.pm');
+}
+
+WriteMakefile(
+ NAME => "Data::Dumper",
+ VERSION_FROM => 'Dumper.pm',
+ DISTNAME => 'Data-Dumper',
+ (
+ $] > 5.00470 ?
+ (INSTALLDIRS => 'perl') :
+ ()
+ ),
+ 'dist' => {
+ COMPRESS => 'gzip -9f',
+ SUFFIX => 'gz',
+ },
+);
Added: trunk/orca/packages/Data-Dumper-2.101/Dumper.pm
==============================================================================
--- trunk/orca/packages/Data-Dumper-2.101/Dumper.pm (original)
+++ trunk/orca/packages/Data-Dumper-2.101/Dumper.pm Sat Jul 13 19:22:30 2002
@@ -0,0 +1,1000 @@
+#
+# Data/Dumper.pm
+#
+# convert perl data structures into perl syntax suitable for both printing
+# and eval
+#
+# Documentation at the __END__
+#
+
+package Data::Dumper;
+
+$VERSION = $VERSION = '2.101';
+
+#$| = 1;
+
+require 5.004;
+require Exporter;
+require DynaLoader;
+require overload;
+
+use Carp;
+
+ at ISA = qw(Exporter DynaLoader);
+ at EXPORT = qw(Dumper);
+ at EXPORT_OK = qw(DumperX);
+
+bootstrap Data::Dumper;
+
+# module vars and their defaults
+$Indent = 2 unless defined $Indent;
+$Purity = 0 unless defined $Purity;
+$Pad = "" unless defined $Pad;
+$Varname = "VAR" unless defined $Varname;
+$Useqq = 0 unless defined $Useqq;
+$Terse = 0 unless defined $Terse;
+$Freezer = "" unless defined $Freezer;
+$Toaster = "" unless defined $Toaster;
+$Deepcopy = 0 unless defined $Deepcopy;
+$Quotekeys = 1 unless defined $Quotekeys;
+$Bless = "bless" unless defined $Bless;
+#$Expdepth = 0 unless defined $Expdepth;
+#$Maxdepth = 0 unless defined $Maxdepth;
+
+#
+# expects an arrayref of values to be dumped.
+# can optionally pass an arrayref of names for the values.
+# names must have leading $ sign stripped. begin the name with *
+# to cause output of arrays and hashes rather than refs.
+#
+sub new {
+ my($c, $v, $n) = @_;
+
+ croak "Usage: PACKAGE->new(ARRAYREF, [ARRAYREF])"
+ unless (defined($v) && (ref($v) eq 'ARRAY'));
+ $n = [] unless (defined($n) && (ref($v) eq 'ARRAY'));
+
+ my($s) = {
+ level => 0, # current recursive depth
+ indent => $Indent, # various styles of indenting
+ pad => $Pad, # all lines prefixed by this string
+ xpad => "", # padding-per-level
+ apad => "", # added padding for hash keys n such
+ sep => "", # list separator
+ seen => {}, # local (nested) refs (id => [name, val])
+ todump => $v, # values to dump []
+ names => $n, # optional names for values []
+ varname => $Varname, # prefix to use for tagging nameless ones
+ purity => $Purity, # degree to which output is evalable
+ useqq => $Useqq, # use "" for strings (backslashitis ensues)
+ terse => $Terse, # avoid name output (where feasible)
+ freezer => $Freezer, # name of Freezer method for objects
+ toaster => $Toaster, # name of method to revive objects
+ deepcopy => $Deepcopy, # dont cross-ref, except to stop recursion
+ quotekeys => $Quotekeys, # quote hash keys
+ 'bless' => $Bless, # keyword to use for "bless"
+# expdepth => $Expdepth, # cutoff depth for explicit dumping
+# maxdepth => $Maxdepth, # depth beyond which we give up
+ };
+
+ if ($Indent > 0) {
+ $s->{xpad} = " ";
+ $s->{sep} = "\n";
+ }
+ return bless($s, $c);
+}
+
+#
+# add-to or query the table of already seen references
+#
+sub Seen {
+ my($s, $g) = @_;
+ if (defined($g) && (ref($g) eq 'HASH')) {
+ my($k, $v, $id);
+ while (($k, $v) = each %$g) {
+ if (defined $v and ref $v) {
+ ($id) = (overload::StrVal($v) =~ /\((.*)\)$/);
+ if ($k =~ /^[*](.*)$/) {
+ $k = (ref $v eq 'ARRAY') ? ( "\\\@" . $1 ) :
+ (ref $v eq 'HASH') ? ( "\\\%" . $1 ) :
+ (ref $v eq 'CODE') ? ( "\\\&" . $1 ) :
+ ( "\$" . $1 ) ;
+ }
+ elsif ($k !~ /^\$/) {
+ $k = "\$" . $k;
+ }
+ $s->{seen}{$id} = [$k, $v];
+ }
+ else {
+ carp "Only refs supported, ignoring non-ref item \$$k";
+ }
+ }
+ return $s;
+ }
+ else {
+ return map { @$_ } values %{$s->{seen}};
+ }
+}
+
+#
+# set or query the values to be dumped
+#
+sub Values {
+ my($s, $v) = @_;
+ if (defined($v) && (ref($v) eq 'ARRAY')) {
+ $s->{todump} = [@$v]; # make a copy
+ return $s;
+ }
+ else {
+ return @{$s->{todump}};
+ }
+}
+
+#
+# set or query the names of the values to be dumped
+#
+sub Names {
+ my($s, $n) = @_;
+ if (defined($n) && (ref($n) eq 'ARRAY')) {
+ $s->{names} = [@$n]; # make a copy
+ return $s;
+ }
+ else {
+ return @{$s->{names}};
+ }
+}
+
+sub DESTROY {}
+
+#
+# dump the refs in the current dumper object.
+# expects same args as new() if called via package name.
+#
+sub Dump {
+ my($s) = shift;
+ my(@out, $val, $name);
+ my($i) = 0;
+ local(@post);
+
+ $s = $s->new(@_) unless ref $s;
+
+ for $val (@{$s->{todump}}) {
+ my $out = "";
+ @post = ();
+ $name = $s->{names}[$i++];
+ if (defined $name) {
+ if ($name =~ /^[*](.*)$/) {
+ if (defined $val) {
+ $name = (ref $val eq 'ARRAY') ? ( "\@" . $1 ) :
+ (ref $val eq 'HASH') ? ( "\%" . $1 ) :
+ (ref $val eq 'CODE') ? ( "\*" . $1 ) :
+ ( "\$" . $1 ) ;
+ }
+ else {
+ $name = "\$" . $1;
+ }
+ }
+ elsif ($name !~ /^\$/) {
+ $name = "\$" . $name;
+ }
+ }
+ else {
+ $name = "\$" . $s->{varname} . $i;
+ }
+
+ my $valstr;
+ {
+ local($s->{apad}) = $s->{apad};
+ $s->{apad} .= ' ' x (length($name) + 3) if $s->{indent} >= 2;
+ $valstr = $s->_dump($val, $name);
+ }
+
+ $valstr = "$name = " . $valstr . ';' if @post or !$s->{terse};
+ $out .= $s->{pad} . $valstr . $s->{sep};
+ $out .= $s->{pad} . join(';' . $s->{sep} . $s->{pad}, @post)
+ . ';' . $s->{sep} if @post;
+
+ push @out, $out;
+ }
+ return wantarray ? @out : join('', @out);
+}
+
+#
+# twist, toil and turn;
+# and recurse, of course.
+#
+sub _dump {
+ my($s, $val, $name) = @_;
+ my($sname);
+ my($out, $realpack, $realtype, $type, $ipad, $id, $blesspad);
+
+ $type = ref $val;
+ $out = "";
+
+ if ($type) {
+
+ # prep it, if it looks like an object
+ if ($type =~ /[a-z_:]/) {
+ my $freezer = $s->{freezer};
+ $val->$freezer() if $freezer && UNIVERSAL::can($val, $freezer);
+ }
+
+ ($realpack, $realtype, $id) =
+ (overload::StrVal($val) =~ /^(?:(.*)\=)?([^=]*)\(([^\(]*)\)$/);
+
+ # if it has a name, we need to either look it up, or keep a tab
+ # on it so we know when we hit it later
+ if (defined($name) and length($name)) {
+ # keep a tab on it so that we dont fall into recursive pit
+ if (exists $s->{seen}{$id}) {
+# if ($s->{expdepth} < $s->{level}) {
+ if ($s->{purity} and $s->{level} > 0) {
+ $out = ($realtype eq 'HASH') ? '{}' :
+ ($realtype eq 'ARRAY') ? '[]' :
+ "''" ;
+ push @post, $name . " = " . $s->{seen}{$id}[0];
+ }
+ else {
+ $out = $s->{seen}{$id}[0];
+ if ($name =~ /^([\@\%])/) {
+ my $start = $1;
+ if ($out =~ /^\\$start/) {
+ $out = substr($out, 1);
+ }
+ else {
+ $out = $start . '{' . $out . '}';
+ }
+ }
+ }
+ return $out;
+# }
+ }
+ else {
+ # store our name
+ $s->{seen}{$id} = [ (($name =~ /^[@%]/) ? ('\\' . $name ) :
+ ($realtype eq 'CODE' and
+ $name =~ /^[*](.*)$/) ? ('\\&' . $1 ) :
+ $name ),
+ $val ];
+ }
+ }
+
+ $s->{level}++;
+ $ipad = $s->{xpad} x $s->{level};
+
+ if ($realpack) { # we have a blessed ref
+ $out = $s->{'bless'} . '( ';
+ $blesspad = $s->{apad};
+ $s->{apad} .= ' ' if ($s->{indent} >= 2);
+ }
+
+ if ($realtype eq 'SCALAR') {
+ if ($realpack) {
+ $out .= 'do{\\(my $o = ' . $s->_dump($$val, "\${$name}") . ')}';
+ }
+ else {
+ $out .= '\\' . $s->_dump($$val, "\${$name}");
+ }
+ }
+ elsif ($realtype eq 'GLOB') {
+ $out .= '\\' . $s->_dump($$val, "*{$name}");
+ }
+ elsif ($realtype eq 'ARRAY') {
+ my($v, $pad, $mname);
+ my($i) = 0;
+ $out .= ($name =~ /^\@/) ? '(' : '[';
+ $pad = $s->{sep} . $s->{pad} . $s->{apad};
+ ($name =~ /^\@(.*)$/) ? ($mname = "\$" . $1) :
+ # omit -> if $foo->[0]->{bar}, but not ${$foo->[0]}->{bar}
+ ($name =~ /^\\?[\%\@\*\$][^{].*[]}]$/) ? ($mname = $name) :
+ ($mname = $name . '->');
+ $mname .= '->' if $mname =~ /^\*.+\{[A-Z]+\}$/;
+ for $v (@$val) {
+ $sname = $mname . '[' . $i . ']';
+ $out .= $pad . $ipad . '#' . $i if $s->{indent} >= 3;
+ $out .= $pad . $ipad . $s->_dump($v, $sname);
+ $out .= "," if $i++ < $#$val;
+ }
+ $out .= $pad . ($s->{xpad} x ($s->{level} - 1)) if $i;
+ $out .= ($name =~ /^\@/) ? ')' : ']';
+ }
+ elsif ($realtype eq 'HASH') {
+ my($k, $v, $pad, $lpad, $mname);
+ $out .= ($name =~ /^\%/) ? '(' : '{';
+ $pad = $s->{sep} . $s->{pad} . $s->{apad};
+ $lpad = $s->{apad};
+ ($name =~ /^\%(.*)$/) ? ($mname = "\$" . $1) :
+ # omit -> if $foo->[0]->{bar}, but not ${$foo->[0]}->{bar}
+ ($name =~ /^\\?[\%\@\*\$][^{].*[]}]$/) ? ($mname = $name) :
+ ($mname = $name . '->');
+ $mname .= '->' if $mname =~ /^\*.+\{[A-Z]+\}$/;
+ while (($k, $v) = each %$val) {
+ my $nk = $s->_dump($k, "");
+ $nk = $1 if !$s->{quotekeys} and $nk =~ /^[\"\']([A-Za-z_]\w*)[\"\']$/;
+ $sname = $mname . '{' . $nk . '}';
+ $out .= $pad . $ipad . $nk . " => ";
+
+ # temporarily alter apad
+ $s->{apad} .= (" " x (length($nk) + 4)) if $s->{indent} >= 2;
+ $out .= $s->_dump($val->{$k}, $sname) . ",";
+ $s->{apad} = $lpad if $s->{indent} >= 2;
+ }
+ if (substr($out, -1) eq ',') {
+ chop $out;
+ $out .= $pad . ($s->{xpad} x ($s->{level} - 1));
+ }
+ $out .= ($name =~ /^\%/) ? ')' : '}';
+ }
+ elsif ($realtype eq 'CODE') {
+ $out .= 'sub { "DUMMY" }';
+ carp "Encountered CODE ref, using dummy placeholder" if $s->{purity};
+ }
+ else {
+ croak "Can\'t handle $realtype type.";
+ }
+
+ if ($realpack) { # we have a blessed ref
+ $out .= ', \'' . $realpack . '\'' . ' )';
+ $out .= '->' . $s->{toaster} . '()' if $s->{toaster} ne '';
+ $s->{apad} = $blesspad;
+ }
+ $s->{level}--;
+
+ }
+ else { # simple scalar
+
+ my $ref = \$_[1];
+ # first, catalog the scalar
+ if ($name ne '') {
+ ($id) = ("$ref" =~ /\(([^\(]*)\)$/);
+ if (exists $s->{seen}{$id}) {
+ if ($s->{seen}{$id}[2]) {
+ $out = $s->{seen}{$id}[0];
+ #warn "[<$out]\n";
+ return "\${$out}";
+ }
+ }
+ else {
+ #warn "[>\\$name]\n";
+ $s->{seen}{$id} = ["\\$name", $ref];
+ }
+ }
+ if (ref($ref) eq 'GLOB' or "$ref" =~ /=GLOB\([^()]+\)$/) { # glob
+ my $name = substr($val, 1);
+ if ($name =~ /^[A-Za-z_][\w:]*$/) {
+ $name =~ s/^main::/::/;
+ $sname = $name;
+ }
+ else {
+ $sname = $s->_dump($name, "");
+ $sname = '{' . $sname . '}';
+ }
+ if ($s->{purity}) {
+ my $k;
+ local ($s->{level}) = 0;
+ for $k (qw(SCALAR ARRAY HASH)) {
+ my $gval = *$val{$k};
+ next unless defined $gval;
+ next if $k eq "SCALAR" && ! defined $$gval; # always there
+
+ # _dump can push into @post, so we hold our place using $postlen
+ my $postlen = scalar @post;
+ $post[$postlen] = "\*$sname = ";
+ local ($s->{apad}) = " " x length($post[$postlen]) if $s->{indent} >= 2;
+ $post[$postlen] .= $s->_dump($gval, "\*$sname\{$k\}");
+ }
+ }
+ $out .= '*' . $sname;
+ }
+ elsif (!defined($val)) {
+ $out .= "undef";
+ }
+ elsif ($val =~ /^-?[1-9]\d{0,8}$/) { # safe decimal number
+ $out .= $val;
+ }
+ else { # string
+ if ($s->{useqq}) {
+ $out .= qquote($val, $s->{useqq});
+ }
+ else {
+ $val =~ s/([\\\'])/\\$1/g;
+ $out .= '\'' . $val . '\'';
+ }
+ }
+ }
+ if ($id) {
+ # if we made it this far, $id was added to seen list at current
+ # level, so remove it to get deep copies
+ if ($s->{deepcopy}) {
+ delete($s->{seen}{$id});
+ }
+ elsif ($name) {
+ $s->{seen}{$id}[2] = 1;
+ }
+ }
+ return $out;
+}
+
+#
+# non-OO style of earlier version
+#
+sub Dumper {
+ return Data::Dumper->Dump([@_]);
+}
+
+#
+# same, only calls the XS version
+#
+sub DumperX {
+ return Data::Dumper->Dumpxs([@_], []);
+}
+
+sub Dumpf { return Data::Dumper->Dump(@_) }
+
+sub Dumpp { print Data::Dumper->Dump(@_) }
+
+#
+# reset the "seen" cache
+#
+sub Reset {
+ my($s) = shift;
+ $s->{seen} = {};
+ return $s;
+}
+
+sub Indent {
+ my($s, $v) = @_;
+ if (defined($v)) {
+ if ($v == 0) {
+ $s->{xpad} = "";
+ $s->{sep} = "";
+ }
+ else {
+ $s->{xpad} = " ";
+ $s->{sep} = "\n";
+ }
+ $s->{indent} = $v;
+ return $s;
+ }
+ else {
+ return $s->{indent};
+ }
+}
+
+sub Pad {
+ my($s, $v) = @_;
+ defined($v) ? (($s->{pad} = $v), return $s) : $s->{pad};
+}
+
+sub Varname {
+ my($s, $v) = @_;
+ defined($v) ? (($s->{varname} = $v), return $s) : $s->{varname};
+}
+
+sub Purity {
+ my($s, $v) = @_;
+ defined($v) ? (($s->{purity} = $v), return $s) : $s->{purity};
+}
+
+sub Useqq {
+ my($s, $v) = @_;
+ defined($v) ? (($s->{useqq} = $v), return $s) : $s->{useqq};
+}
+
+sub Terse {
+ my($s, $v) = @_;
+ defined($v) ? (($s->{terse} = $v), return $s) : $s->{terse};
+}
+
+sub Freezer {
+ my($s, $v) = @_;
+ defined($v) ? (($s->{freezer} = $v), return $s) : $s->{freezer};
+}
+
+sub Toaster {
+ my($s, $v) = @_;
+ defined($v) ? (($s->{toaster} = $v), return $s) : $s->{toaster};
+}
+
+sub Deepcopy {
+ my($s, $v) = @_;
+ defined($v) ? (($s->{deepcopy} = $v), return $s) : $s->{deepcopy};
+}
+
+sub Quotekeys {
+ my($s, $v) = @_;
+ defined($v) ? (($s->{quotekeys} = $v), return $s) : $s->{quotekeys};
+}
+
+sub Bless {
+ my($s, $v) = @_;
+ defined($v) ? (($s->{'bless'} = $v), return $s) : $s->{'bless'};
+}
+
+# used by qquote below
+my %esc = (
+ "\a" => "\\a",
+ "\b" => "\\b",
+ "\t" => "\\t",
+ "\n" => "\\n",
+ "\f" => "\\f",
+ "\r" => "\\r",
+ "\e" => "\\e",
+);
+
+# put a string value in double quotes
+sub qquote {
+ local($_) = shift;
+ s/([\\\"\@\$])/\\$1/g;
+ return qq("$_") unless /[^\040-\176]/; # fast exit
+
+ my $high = shift || "";
+ s/([\a\b\t\n\f\r\e])/$esc{$1}/g;
+
+ # no need for 3 digits in escape for these
+ s/([\0-\037])(?!\d)/'\\'.sprintf('%o',ord($1))/eg;
+
+ s/([\0-\037\177])/'\\'.sprintf('%03o',ord($1))/eg;
+ if ($high eq "iso8859") {
+ s/([\200-\240])/'\\'.sprintf('%o',ord($1))/eg;
+ } elsif ($high eq "utf8") {
+# use utf8;
+# $str =~ s/([^\040-\176])/sprintf "\\x{%04x}", ord($1)/ge;
+ } elsif ($high eq "8bit") {
+ # leave it as it is
+ } else {
+ s/([\0-\037\177-\377])/'\\'.sprintf('%03o',ord($1))/eg;
+ }
+ return qq("$_");
+}
+
+1;
+__END__
+
+=head1 NAME
+
+Data::Dumper - stringified perl data structures, suitable for both printing and C<eval>
+
+
+=head1 SYNOPSIS
+
+ use Data::Dumper;
+
+ # simple procedural interface
+ print Dumper($foo, $bar);
+
+ # extended usage with names
+ print Data::Dumper->Dump([$foo, $bar], [qw(foo *ary)]);
+
+ # configuration variables
+ {
+ local $Data::Dump::Purity = 1;
+ eval Data::Dumper->Dump([$foo, $bar], [qw(foo *ary)]);
+ }
+
+ # OO usage
+ $d = Data::Dumper->new([$foo, $bar], [qw(foo *ary)]);
+ ...
+ print $d->Dump;
+ ...
+ $d->Purity(1)->Terse(1)->Deepcopy(1);
+ eval $d->Dump;
+
+
+=head1 DESCRIPTION
+
+Given a list of scalars or reference variables, writes out their contents in
+perl syntax. The references can also be objects. The contents of each
+variable is output in a single Perl statement. Handles self-referential
+structures correctly.
+
+The return value can be C<eval>ed to get back an identical copy of the
+original reference structure.
+
+Any references that are the same as one of those passed in will be named
+C<$VAR>I<n> (where I<n> is a numeric suffix), and other duplicate references
+to substructures within C<$VAR>I<n> will be appropriately labeled using arrow
+notation. You can specify names for individual values to be dumped if you
+use the C<Dump()> method, or you can change the default C<$VAR> prefix to
+something else. See C<$Data::Dumper::Varname> and C<$Data::Dumper::Terse>
+below.
+
+The default output of self-referential structures can be C<eval>ed, but the
+nested references to C<$VAR>I<n> will be undefined, since a recursive
+structure cannot be constructed using one Perl statement. You should set the
+C<Purity> flag to 1 to get additional statements that will correctly fill in
+these references.
+
+In the extended usage form, the references to be dumped can be given
+user-specified names. If a name begins with a C<*>, the output will
+describe the dereferenced type of the supplied reference for hashes and
+arrays, and coderefs. Output of names will be avoided where possible if
+the C<Terse> flag is set.
+
+In many cases, methods that are used to set the internal state of the
+object will return the object itself, so method calls can be conveniently
+chained together.
+
+Several styles of output are possible, all controlled by setting
+the C<Indent> flag. See L<Configuration Variables or Methods> below
+for details.
+
+
+=head2 Methods
+
+=over 4
+
+=item I<PACKAGE>->new(I<ARRAYREF [>, I<ARRAYREF]>)
+
+Returns a newly created C<Data::Dumper> object. The first argument is an
+anonymous array of values to be dumped. The optional second argument is an
+anonymous array of names for the values. The names need not have a leading
+C<$> sign, and must be comprised of alphanumeric characters. You can begin
+a name with a C<*> to specify that the dereferenced type must be dumped
+instead of the reference itself, for ARRAY and HASH references.
+
+The prefix specified by C<$Data::Dumper::Varname> will be used with a
+numeric suffix if the name for a value is undefined.
+
+Data::Dumper will catalog all references encountered while dumping the
+values. Cross-references (in the form of names of substructures in perl
+syntax) will be inserted at all possible points, preserving any structural
+interdependencies in the original set of values. Structure traversal is
+depth-first, and proceeds in order from the first supplied value to
+the last.
+
+=item I<$OBJ>->Dump I<or> I<PACKAGE>->Dump(I<ARRAYREF [>, I<ARRAYREF]>)
+
+Returns the stringified form of the values stored in the object (preserving
+the order in which they were supplied to C<new>), subject to the
+configuration options below. In an array context, it returns a list
+of strings corresponding to the supplied values.
+
+The second form, for convenience, simply calls the C<new> method on its
+arguments before dumping the object immediately.
+
+=item I<$OBJ>->Dumpxs I<or> I<PACKAGE>->Dumpxs(I<ARRAYREF [>, I<ARRAYREF]>)
+
+This method is available if you were able to compile and install the XSUB
+extension to C<Data::Dumper>. It is exactly identical to the C<Dump> method
+above, only about 4 to 5 times faster, since it is written entirely in C.
+
+=item I<$OBJ>->Seen(I<[HASHREF]>)
+
+Queries or adds to the internal table of already encountered references.
+You must use C<Reset> to explicitly clear the table if needed. Such
+references are not dumped; instead, their names are inserted wherever they
+are encountered subsequently. This is useful especially for properly
+dumping subroutine references.
+
+Expects a anonymous hash of name => value pairs. Same rules apply for names
+as in C<new>. If no argument is supplied, will return the "seen" list of
+name => value pairs, in an array context. Otherwise, returns the object
+itself.
+
+=item I<$OBJ>->Values(I<[ARRAYREF]>)
+
+Queries or replaces the internal array of values that will be dumped.
+When called without arguments, returns the values. Otherwise, returns the
+object itself.
+
+=item I<$OBJ>->Names(I<[ARRAYREF]>)
+
+Queries or replaces the internal array of user supplied names for the values
+that will be dumped. When called without arguments, returns the names.
+Otherwise, returns the object itself.
+
+=item I<$OBJ>->Reset
+
+Clears the internal table of "seen" references and returns the object
+itself.
+
+=back
+
+=head2 Functions
+
+=over 4
+
+=item Dumper(I<LIST>)
+
+Returns the stringified form of the values in the list, subject to the
+configuration options below. The values will be named C<$VAR>I<n> in the
+output, where I<n> is a numeric suffix. Will return a list of strings
+in an array context.
+
+=item DumperX(I<LIST>)
+
+Identical to the C<Dumper()> function above, but this calls the XSUB
+implementation. Only available if you were able to compile and install
+the XSUB extensions in C<Data::Dumper>.
+
+=back
+
+=head2 Configuration Variables or Methods
+
+Several configuration variables can be used to control the kind of output
+generated when using the procedural interface. These variables are usually
+C<local>ized in a block so that other parts of the code are not affected by
+the change.
+
+These variables determine the default state of the object created by calling
+the C<new> method, but cannot be used to alter the state of the object
+thereafter. The equivalent method names should be used instead to query
+or set the internal state of the object.
+
+The method forms return the object itself when called with arguments,
+so that they can be chained together nicely.
+
+=over 4
+
+=item $Data::Dumper::Indent I<or> I<$OBJ>->Indent(I<[NEWVAL]>)
+
+Controls the style of indentation. It can be set to 0, 1, 2 or 3. Style 0
+spews output without any newlines, indentation, or spaces between list
+items. It is the most compact format possible that can still be called
+valid perl. Style 1 outputs a readable form with newlines but no fancy
+indentation (each level in the structure is simply indented by a fixed
+amount of whitespace). Style 2 (the default) outputs a very readable form
+which takes into account the length of hash keys (so the hash value lines
+up). Style 3 is like style 2, but also annotates the elements of arrays
+with their index (but the comment is on its own line, so array output
+consumes twice the number of lines). Style 2 is the default.
+
+=item $Data::Dumper::Purity I<or> I<$OBJ>->Purity(I<[NEWVAL]>)
+
+Controls the degree to which the output can be C<eval>ed to recreate the
+supplied reference structures. Setting it to 1 will output additional perl
+statements that will correctly recreate nested references. The default is
+0.
+
+=item $Data::Dumper::Pad I<or> I<$OBJ>->Pad(I<[NEWVAL]>)
+
+Specifies the string that will be prefixed to every line of the output.
+Empty string by default.
+
+=item $Data::Dumper::Varname I<or> I<$OBJ>->Varname(I<[NEWVAL]>)
+
+Contains the prefix to use for tagging variable names in the output. The
+default is "VAR".
+
+=item $Data::Dumper::Useqq I<or> I<$OBJ>->Useqq(I<[NEWVAL]>)
+
+When set, enables the use of double quotes for representing string values.
+Whitespace other than space will be represented as C<[\n\t\r]>, "unsafe"
+characters will be backslashed, and unprintable characters will be output as
+quoted octal integers. Since setting this variable imposes a performance
+penalty, the default is 0. The C<Dumpxs()> method does not honor this
+flag yet.
+
+=item $Data::Dumper::Terse I<or> I<$OBJ>->Terse(I<[NEWVAL]>)
+
+When set, Data::Dumper will emit single, non-self-referential values as
+atoms/terms rather than statements. This means that the C<$VAR>I<n> names
+will be avoided where possible, but be advised that such output may not
+always be parseable by C<eval>.
+
+=item $Data::Dumper::Freezer I<or> $I<OBJ>->Freezer(I<[NEWVAL]>)
+
+Can be set to a method name, or to an empty string to disable the feature.
+Data::Dumper will invoke that method via the object before attempting to
+stringify it. This method can alter the contents of the object (if, for
+instance, it contains data allocated from C), and even rebless it in a
+different package. The client is responsible for making sure the specified
+method can be called via the object, and that the object ends up containing
+only perl data types after the method has been called. Defaults to an empty
+string.
+
+=item $Data::Dumper::Toaster I<or> $I<OBJ>->Toaster(I<[NEWVAL]>)
+
+Can be set to a method name, or to an empty string to disable the feature.
+Data::Dumper will emit a method call for any objects that are to be dumped
+using the syntax C<bless(DATA, CLASS)->METHOD()>. Note that this means that
+the method specified will have to perform any modifications required on the
+object (like creating new state within it, and/or reblessing it in a
+different package) and then return it. The client is responsible for making
+sure the method can be called via the object, and that it returns a valid
+object. Defaults to an empty string.
+
+=item $Data::Dumper::Deepcopy I<or> $I<OBJ>->Deepcopy(I<[NEWVAL]>)
+
+Can be set to a boolean value to enable deep copies of structures.
+Cross-referencing will then only be done when absolutely essential
+(i.e., to break reference cycles). Default is 0.
+
+=item $Data::Dumper::Quotekeys I<or> $I<OBJ>->Quotekeys(I<[NEWVAL]>)
+
+Can be set to a boolean value to control whether hash keys are quoted.
+A false value will avoid quoting hash keys when it looks like a simple
+string. Default is 1, which will always enclose hash keys in quotes.
+
+=item $Data::Dumper::Bless I<or> $I<OBJ>->Bless(I<[NEWVAL]>)
+
+Can be set to a string that specifies an alternative to the C<bless>
+builtin operator used to create objects. A function with the specified
+name should exist, and should accept the same arguments as the builtin.
+Default is C<bless>.
+
+=back
+
+=head2 Exports
+
+=over 4
+
+=item Dumper
+
+=back
+
+=head1 EXAMPLES
+
+Run these code snippets to get a quick feel for the behavior of this
+module. When you are through with these examples, you may want to
+add or change the various configuration variables described above,
+to see their behavior. (See the testsuite in the Data::Dumper
+distribution for more examples.)
+
+
+ use Data::Dumper;
+
+ package Foo;
+ sub new {bless {'a' => 1, 'b' => sub { return "foo" }}, $_[0]};
+
+ package Fuz; # a weird REF-REF-SCALAR object
+ sub new {bless \($_ = \ 'fu\'z'), $_[0]};
+
+ package main;
+ $foo = Foo->new;
+ $fuz = Fuz->new;
+ $boo = [ 1, [], "abcd", \*foo,
+ {1 => 'a', 023 => 'b', 0x45 => 'c'},
+ \\"p\q\'r", $foo, $fuz];
+
+ ########
+ # simple usage
+ ########
+
+ $bar = eval(Dumper($boo));
+ print($@) if $@;
+ print Dumper($boo), Dumper($bar); # pretty print (no array indices)
+
+ $Data::Dumper::Terse = 1; # don't output names where feasible
+ $Data::Dumper::Indent = 0; # turn off all pretty print
+ print Dumper($boo), "\n";
+
+ $Data::Dumper::Indent = 1; # mild pretty print
+ print Dumper($boo);
+
+ $Data::Dumper::Indent = 3; # pretty print with array indices
+ print Dumper($boo);
+
+ $Data::Dumper::Useqq = 1; # print strings in double quotes
+ print Dumper($boo);
+
+
+ ########
+ # recursive structures
+ ########
+
+ @c = ('c');
+ $c = \@c;
+ $b = {};
+ $a = [1, $b, $c];
+ $b->{a} = $a;
+ $b->{b} = $a->[1];
+ $b->{c} = $a->[2];
+ print Data::Dumper->Dump([$a,$b,$c], [qw(a b c)]);
+
+
+ $Data::Dumper::Purity = 1; # fill in the holes for eval
+ print Data::Dumper->Dump([$a, $b], [qw(*a b)]); # print as @a
+ print Data::Dumper->Dump([$b, $a], [qw(*b a)]); # print as %b
+
+
+ $Data::Dumper::Deepcopy = 1; # avoid cross-refs
+ print Data::Dumper->Dump([$b, $a], [qw(*b a)]);
+
+
+ $Data::Dumper::Purity = 0; # avoid cross-refs
+ print Data::Dumper->Dump([$b, $a], [qw(*b a)]);
+
+
+ ########
+ # object-oriented usage
+ ########
+
+ $d = Data::Dumper->new([$a,$b], [qw(a b)]);
+ $d->Seen({'*c' => $c}); # stash a ref without printing it
+ $d->Indent(3);
+ print $d->Dump;
+ $d->Reset->Purity(0); # empty the seen cache
+ print join "----\n", $d->Dump;
+
+
+ ########
+ # persistence
+ ########
+
+ package Foo;
+ sub new { bless { state => 'awake' }, shift }
+ sub Freeze {
+ my $s = shift;
+ print STDERR "preparing to sleep\n";
+ $s->{state} = 'asleep';
+ return bless $s, 'Foo::ZZZ';
+ }
+
+ package Foo::ZZZ;
+ sub Thaw {
+ my $s = shift;
+ print STDERR "waking up\n";
+ $s->{state} = 'awake';
+ return bless $s, 'Foo';
+ }
+
+ package Foo;
+ use Data::Dumper;
+ $a = Foo->new;
+ $b = Data::Dumper->new([$a], ['c']);
+ $b->Freezer('Freeze');
+ $b->Toaster('Thaw');
+ $c = $b->Dump;
+ print $c;
+ $d = eval $c;
+ print Data::Dumper->Dump([$d], ['d']);
+
+
+ ########
+ # symbol substitution (useful for recreating CODE refs)
+ ########
+
+ sub foo { print "foo speaking\n" }
+ *other = \&foo;
+ $bar = [ \&other ];
+ $d = Data::Dumper->new([\&other,$bar],['*other','bar']);
+ $d->Seen({ '*foo' => \&foo });
+ print $d->Dump;
+
+
+=head1 BUGS
+
+Due to limitations of Perl subroutine call semantics, you cannot pass an
+array or hash. Prepend it with a C<\> to pass its reference instead. This
+will be remedied in time, with the arrival of prototypes in later versions
+of Perl. For now, you need to use the extended usage form, and prepend the
+name with a C<*> to output it as a hash or array.
+
+C<Data::Dumper> cheats with CODE references. If a code reference is
+encountered in the structure being processed, an anonymous subroutine that
+contains the string '"DUMMY"' will be inserted in its place, and a warning
+will be printed if C<Purity> is set. You can C<eval> the result, but bear
+in mind that the anonymous sub that gets created is just a placeholder.
+Someday, perl will have a switch to cache-on-demand the string
+representation of a compiled piece of code, I hope. If you have prior
+knowledge of all the code refs that your data structures are likely
+to have, you can use the C<Seen> method to pre-seed the internal reference
+table and make the dumped output point to them, instead. See L<EXAMPLES>
+above.
+
+The C<Useqq> flag is not honored by C<Dumpxs()> (it always outputs
+strings in single quotes).
+
+SCALAR objects have the weirdest looking C<bless> workaround.
+
+
+=head1 AUTHOR
+
+Gurusamy Sarathy gsar at umich.edu
+
+Copyright (c) 1996-98 Gurusamy Sarathy. All rights reserved.
+This program is free software; you can redistribute it and/or
+modify it under the same terms as Perl itself.
+
+
+=head1 VERSION
+
+Version 2.101 (30 Apr 1999)
+
+=head1 SEE ALSO
+
+perl(1)
+
+=cut
Added: trunk/orca/packages/Data-Dumper-2.101/Changes
==============================================================================
--- trunk/orca/packages/Data-Dumper-2.101/Changes (original)
+++ trunk/orca/packages/Data-Dumper-2.101/Changes Sat Jul 13 19:22:30 2002
@@ -0,0 +1,183 @@
+=head1 NAME
+
+HISTORY - public release history for Data::Dumper
+
+=head1 DESCRIPTION
+
+=over 8
+
+=item 2.101 (30 Apr 1999)
+
+Minor release to sync with version in 5.005_03. Fixes dump of
+dummy coderefs.
+
+=item 2.10 (31 Oct 1998)
+
+Bugfixes for dumping related undef values, globs, and better double
+quoting: three patches suggested by Gisle Aas <gisle at aas.no>.
+
+Escaping of single quotes in the XS version could get tripped up
+by the presence of nulls in the string. Fix suggested by
+Slaven Rezic <eserte at cs.tu-berlin.de>.
+
+Rather large scale reworking of the logic in how seen values
+are stashed. Anonymous scalars that may be encountered while
+traversing the structure are properly tracked, in case they become
+used in data dumped in a later pass. There used to be a problem
+with the previous logic that prevented such structures from being
+dumped correctly.
+
+Various additions to the testsuite.
+
+=item 2.09 (9 July 1998)
+
+Implement $Data::Dumper::Bless, suggested by Mark Daku <daku at nortel.ca>.
+
+=item 2.081 (15 January 1998)
+
+Minor release to fix Makefile.PL not accepting MakeMaker args.
+
+=item 2.08 (7 December 1997)
+
+Glob dumps don't output superflous 'undef' anymore.
+
+Fixes from Gisle Aas <gisle at aas.no> to make Dumper() work with
+overloaded strings in recent perls, and his new testsuite.
+
+require 5.004.
+
+A separate flag to always quote hash keys (on by default).
+
+Recreating known CODE refs is now better supported.
+
+Changed flawed constant SCALAR bless workaround.
+
+=item 2.07 (7 December 1996)
+
+Dumpxs output is now exactly the same as Dump. It still doesn't
+honor C<Useqq> though.
+
+Regression tests test for identical output and C<eval>-ability.
+
+Bug in *GLOB{THING} output fixed.
+
+Other small enhancements.
+
+=item 2.06 (2 December 1996)
+
+Bugfix that was serious enough for new release--the bug cripples
+MLDBM. Problem was "Attempt to modify readonly value..." failures
+that stemmed for a misguided SvPV_force() instead of a SvPV().)
+
+=item 2.05 (2 December 1996)
+
+Fixed the type mismatch that was causing Dumpxs test to fail
+on 64-bit platforms.
+
+GLOB elements are dumped now when C<Purity> is set (using the
+*GLOB{THING} syntax).
+
+The C<Freezer> option can be set to a method name to call
+before probing objects for dumping. Some applications: objects with
+external data, can re-bless themselves into a transitional package;
+Objects the maintain ephemeral state (like open files) can put
+additional information in the object to facilitate persistence.
+
+The corresponding C<Toaster> option, if set, specifies
+the method call that will revive the frozen object.
+
+The C<Deepcopy> flag has been added to do just that.
+
+Dumper does more aggressive cataloging of SCALARs encountered
+within ARRAY/HASH structures. Thanks to Norman Gaywood
+<norm at godel.une.edu.au> for reporting the problem.
+
+Objects that C<overload> the '""' operator are now handled
+properly by the C<Dump> method.
+
+Significant additions to the testsuite.
+
+More documentation.
+
+=item 2.04beta (28 August 1996)
+
+Made dump of glob names respect C<Useqq> setting.
+
+[@$%] are now escaped now when in double quotes.
+
+=item 2.03beta (26 August 1996)
+
+Fixed Dumpxs. It was appending trailing nulls to globnames.
+(reported by Randal Schwartz <merlyn at teleport.com>).
+
+Calling the C<Indent()> method on a dumper object now correctly
+resets the internal separator (reported by Curt Tilmes
+<curt at ltpmail.gsfc.nasa.gov>).
+
+New C<Terse> option to suppress the 'C<VARI<n> = >' prefix
+introduced. If the option is set, they are output only when
+absolutely essential.
+
+The C<Useqq> flag is supported (but not by the XSUB version
+yet).
+
+Embedded nulls in keys are now handled properly by Dumpxs.
+
+Dumper.xs now use various integer types in perl.h (should
+make it compile without noises on 64 bit platforms, although
+I haven't been able to test this).
+
+All the dump methods now return a list of strings in a list
+context.
+
+
+=item 2.02beta (13 April 1996)
+
+Non portable sprintf usage in XS code fixed (thanks to
+Ulrich Pfeifer <pfeifer at charly.informatik.uni-dortmund.de>).
+
+
+=item 2.01beta (10 April 1996)
+
+Minor bugfix (single digit numbers were always getting quoted).
+
+
+=item 2.00beta (9 April 1996)
+
+C<Dumpxs> is now the exact XSUB equivalent of C<Dump>. The XS version
+is 4-5 times faster.
+
+C<require 5.002>.
+
+MLDBM example removed (as its own module, it has a separate CPAN
+reality now).
+
+Fixed bugs in handling keys with wierd characters. Perl can be
+tripped up in its implicit quoting of the word before '=>'. The
+fix: C<Data::Dumper::Purity>, when set, always triggers quotes
+around hash keys.
+
+Andreas Koenig <k at anna.in-berlin.de> pointed out that handling octals
+is busted. His patch added.
+
+Dead code removed, other minor documentation fixes.
+
+
+=item 1.23 (3 Dec 1995)
+
+MLDBM example added.
+
+Several folks pointed out that quoting of ticks and backslashes
+in strings is missing. Added.
+
+Ian Phillips <ian at pipex.net> pointed out that numerics may lose
+precision without quotes. Fixed.
+
+
+=item 1.21 (20 Nov 1995)
+
+Last stable version I can remember.
+
+=back
+
+=cut
Added: trunk/orca/packages/Data-Dumper-2.101/README
==============================================================================
--- trunk/orca/packages/Data-Dumper-2.101/README (original)
+++ trunk/orca/packages/Data-Dumper-2.101/README Sat Jul 13 19:22:30 2002
@@ -0,0 +1,115 @@
+This is the README file for Data::Dumper, the Perl
+data-structure printing/stringification module.
+
+This is version 2.101. See the CHANGES section below for details on
+the new features and fixes in this release.
+
+Perl version 5.004 or later is required to build and use this module.
+
+
+---NOTE----NOTE----NOTE----NOTE----NOTE----NOTE----NOTE----NOTE---NOTE---
+| |
+| Data-Dumper comes standard with perl from version 5.004_71. |
+| |
+| Building the XSUB version *requires* a C compiler that groks ANSI |
+| prototypes. You may install the non-XSUB version if you're not so |
+| lucky. See the INSTALLATION section below. |
+| |
+---NOTE----NOTE----NOTE----NOTE----NOTE----NOTE----NOTE----NOTE---NOTE---
+
+
+You may find this module useful if you:
+
+ * are writing an application that must display perl data
+ structures.
+
+ * want to store some data structures to disk, in either a
+ compact or perl readable format (Dumper outputs pure perl,
+ so you don't have to invent your own portable data format, or
+ parse it; simply "do FILE" or read the data back in with
+ perl and eval() it). See the MLDBM module for an example of
+ one such use.
+
+ * want a simple, yet powerful, persistence engine that can be
+ quickly integrated into your application, and is a breeze to
+ debug.
+
+ * want a facility to make copies of data, or quickly find
+ differences between data structures.
+
+Dumper can be efficient if you are on a system supported by MakeMaker
+and xsubpp, the perl extension building tools. In this case, Dumper
+will build a XSUB extension that will be much faster than its perl
+counterpart (in my tests, about 4 to 5 times). In either case,
+you should be able to use this module without loss of functionality.
+
+See the embedded documentation in the module for more details.
+
+Comments and suggestions are always welcome.
+
+ - Sarathy.
+ gsar at umich.edu
+
+---------------------------------------------------------------------
+INSTALLATION
+
+Installation requires a functional Makemaker and perl 5.004 or later. Do
+either of these:
+
+ a. If your platform has a C compiler AND is supported by xsubpp:
+
+ gzip -c -d Data-Dumper-2.101.tar.gz | tar xvf -
+ cd Data-Dumper-2.101
+ perl Makefile.PL
+ make test
+ make install
+
+ b. If you have no C compiler OR if you don't want the XSUB extension
+ (no loss of functionality, only speed):
+
+ gzip -c -d Data-Dumper-2.101.tar.gz | tar xvf -
+ cd Data-Dumper-2.101
+ perl Makefile.PL noxs # flips to non-xsub version
+ make test
+ make install
+
+ If you decide to go back to compiling the XSUB extension after this,
+ just do:
+
+ make distclean
+ perl Makefile.PL xs # flips to non-xsub version
+ make test
+ make install
+
+---------------------------------------------------------------------
+CHANGES
+
+ 2.101 (30 Apr 1999)
+ Minor release to sync with version in 5.005_03. Fixes
+ dump of dummy coderefs.
+
+ 2.10 (31 Oct 1998)
+ Bugfixes for dumping related undef values, globs, and better
+ double quoting: three patches suggested by Gisle Aas
+ <gisle at aas.no>.
+
+ Escaping of single quotes in the XS version could get tripped
+ up by the presence of nulls in the string. Fix suggested by
+ Slaven Rezic <eserte at cs.tu-berlin.de>.
+
+ Rather large scale reworking of the logic in how seen values
+ are stashed. Anonymous scalars that may be encountered while
+ traversing the structure are properly tracked, in case they
+ become used in data dumped in a later pass. There used to be a
+ problem with the previous logic that prevented such structures
+ from being dumped correctly.
+
+ Various additions to the testsuite.
+
+ 2.09 (9 July 1998)
+ Implement $Data::Dumper::Bless, suggested by Mark Daku
+ <daku at nortel.ca>.
+
+ 2.081 (15 January 1998)
+ Minor release to fix Makefile.PL not accepting
+ MakeMaker args.
Modified: trunk/orca/packages/rrdtool-1.0.7.2/configure
==============================================================================
--- trunk/orca/packages/rrdtool-1.0.7.2/configure (original)
+++ trunk/orca/packages/rrdtool-1.0.7.2/configure Sat Jul 13 19:22:31 2002
@@ -11,6 +11,17 @@
ac_help=
ac_default_prefix=/usr/local
# Any additions from configure.in:
+ac_default_prefix=/usr/local/rrdtool-1.0.7
+ac_help="$ac_help
+ --enable-shared[=PKGS] build shared libraries [default=no]"
+ac_help="$ac_help
+ --enable-static[=PKGS] build static libraries [default=yes]"
+ac_help="$ac_help
+ --enable-fast-install[=PKGS] optimize for fast installation [default=yes]"
+ac_help="$ac_help
+ --with-gnu-ld assume the C compiler uses GNU ld [default=no]"
+ac_help="$ac_help
+ --disable-libtool-lock avoid locking (might break parallel builds)"
# Initialize some variables set by options.
# The variables have the same names as the options, with
@@ -522,11 +533,417 @@
+ac_aux_dir=
+for ac_dir in config $srcdir/config; do
+ if test -f $ac_dir/install-sh; then
+ ac_aux_dir=$ac_dir
+ ac_install_sh="$ac_aux_dir/install-sh -c"
+ break
+ elif test -f $ac_dir/install.sh; then
+ ac_aux_dir=$ac_dir
+ ac_install_sh="$ac_aux_dir/install.sh -c"
+ break
+ fi
+done
+if test -z "$ac_aux_dir"; then
+ { echo "configure: error: can not find install-sh or install.sh in config $srcdir/config" 1>&2; exit 1; }
+fi
+ac_config_guess=$ac_aux_dir/config.guess
+ac_config_sub=$ac_aux_dir/config.sub
+ac_configure=$ac_aux_dir/configure # This should be Cygnus configure.
+
+
+
+# Do some error checking and defaulting for the host and target type.
+# The inputs are:
+# configure --host=HOST --target=TARGET --build=BUILD NONOPT
+#
+# The rules are:
+# 1. You are not allowed to specify --host, --target, and nonopt at the
+# same time.
+# 2. Host defaults to nonopt.
+# 3. If nonopt is not specified, then host defaults to the current host,
+# as determined by config.guess.
+# 4. Target and build default to nonopt.
+# 5. If nonopt is not specified, then target and build default to host.
+
+# The aliases save the names the user supplied, while $host etc.
+# will get canonicalized.
+case $host---$target---$nonopt in
+NONE---*---* | *---NONE---* | *---*---NONE) ;;
+*) { echo "configure: error: can only configure for one host and one target at a time" 1>&2; exit 1; } ;;
+esac
+
+
+# Make sure we can run config.sub.
+if ${CONFIG_SHELL-/bin/sh} $ac_config_sub sun4 >/dev/null 2>&1; then :
+else { echo "configure: error: can not run $ac_config_sub" 1>&2; exit 1; }
+fi
+
+echo $ac_n "checking host system type""... $ac_c" 1>&6
+echo "configure:585: checking host system type" >&5
+
+host_alias=$host
+case "$host_alias" in
+NONE)
+ case $nonopt in
+ NONE)
+ if host_alias=`${CONFIG_SHELL-/bin/sh} $ac_config_guess`; then :
+ else { echo "configure: error: can not guess host type; you must specify one" 1>&2; exit 1; }
+ fi ;;
+ *) host_alias=$nonopt ;;
+ esac ;;
+esac
+
+host=`${CONFIG_SHELL-/bin/sh} $ac_config_sub $host_alias`
+host_cpu=`echo $host | sed 's/^\([^-]*\)-\([^-]*\)-\(.*\)$/\1/'`
+host_vendor=`echo $host | sed 's/^\([^-]*\)-\([^-]*\)-\(.*\)$/\2/'`
+host_os=`echo $host | sed 's/^\([^-]*\)-\([^-]*\)-\(.*\)$/\3/'`
+echo "$ac_t""$host" 1>&6
+
+echo $ac_n "checking target system type""... $ac_c" 1>&6
+echo "configure:606: checking target system type" >&5
+
+target_alias=$target
+case "$target_alias" in
+NONE)
+ case $nonopt in
+ NONE) target_alias=$host_alias ;;
+ *) target_alias=$nonopt ;;
+ esac ;;
+esac
+
+target=`${CONFIG_SHELL-/bin/sh} $ac_config_sub $target_alias`
+target_cpu=`echo $target | sed 's/^\([^-]*\)-\([^-]*\)-\(.*\)$/\1/'`
+target_vendor=`echo $target | sed 's/^\([^-]*\)-\([^-]*\)-\(.*\)$/\2/'`
+target_os=`echo $target | sed 's/^\([^-]*\)-\([^-]*\)-\(.*\)$/\3/'`
+echo "$ac_t""$target" 1>&6
+
+echo $ac_n "checking build system type""... $ac_c" 1>&6
+echo "configure:624: checking build system type" >&5
+
+build_alias=$build
+case "$build_alias" in
+NONE)
+ case $nonopt in
+ NONE) build_alias=$host_alias ;;
+ *) build_alias=$nonopt ;;
+ esac ;;
+esac
+
+build=`${CONFIG_SHELL-/bin/sh} $ac_config_sub $build_alias`
+build_cpu=`echo $build | sed 's/^\([^-]*\)-\([^-]*\)-\(.*\)$/\1/'`
+build_vendor=`echo $build | sed 's/^\([^-]*\)-\([^-]*\)-\(.*\)$/\2/'`
+build_os=`echo $build | sed 's/^\([^-]*\)-\([^-]*\)-\(.*\)$/\3/'`
+echo "$ac_t""$build" 1>&6
+
+test "$host_alias" != "$target_alias" &&
+ test "$program_prefix$program_suffix$program_transform_name" = \
+ NONENONEs,x,x, &&
+ program_prefix=${target_alias}-
+
+
+# Find a good install program. We prefer a C program (faster),
+# so one script is as good as another. But avoid the broken or
+# incompatible versions:
+# SysV /etc/install, /usr/sbin/install
+# SunOS /usr/etc/install
+# IRIX /sbin/install
+# AIX /bin/install
+# AIX 4 /usr/bin/installbsd, which doesn't work without a -g flag
+# AFS /usr/afsws/bin/install, which mishandles nonexistent args
+# SVR4 /usr/ucb/install, which tries to use the nonexistent group "staff"
+# ./install, which can be erroneously created by make from ./install.sh.
+echo $ac_n "checking for a BSD compatible install""... $ac_c" 1>&6
+echo "configure:659: checking for a BSD compatible install" >&5
+if test -z "$INSTALL"; then
+if eval "test \"`echo '$''{'ac_cv_path_install'+set}'`\" = set"; then
+ echo $ac_n "(cached) $ac_c" 1>&6
+else
+ IFS="${IFS= }"; ac_save_IFS="$IFS"; IFS=":"
+ for ac_dir in $PATH; do
+ # Account for people who put trailing slashes in PATH elements.
+ case "$ac_dir/" in
+ /|./|.//|/etc/*|/usr/sbin/*|/usr/etc/*|/sbin/*|/usr/afsws/bin/*|/usr/ucb/*) ;;
+ *)
+ # OSF1 and SCO ODT 3.0 have their own names for install.
+ # Don't use installbsd from OSF since it installs stuff as root
+ # by default.
+ for ac_prog in ginstall scoinst install; do
+ if test -f $ac_dir/$ac_prog; then
+ if test $ac_prog = install &&
+ grep dspmsg $ac_dir/$ac_prog >/dev/null 2>&1; then
+ # AIX install. It has an incompatible calling convention.
+ :
+ else
+ ac_cv_path_install="$ac_dir/$ac_prog -c"
+ break 2
+ fi
+ fi
+ done
+ ;;
+ esac
+ done
+ IFS="$ac_save_IFS"
+
+fi
+ if test "${ac_cv_path_install+set}" = set; then
+ INSTALL="$ac_cv_path_install"
+ else
+ # As a last resort, use the slow shell script. We don't cache a
+ # path for INSTALL within a source directory, because that will
+ # break other packages using the cache if that directory is
+ # removed, or if the path is relative.
+ INSTALL="$ac_install_sh"
+ fi
+fi
+echo "$ac_t""$INSTALL" 1>&6
+
+# Use test -z because SunOS4 sh mishandles braces in ${var-val}.
+# It thinks the first close brace ends the variable substitution.
+test -z "$INSTALL_PROGRAM" && INSTALL_PROGRAM='${INSTALL}'
+
+test -z "$INSTALL_SCRIPT" && INSTALL_SCRIPT='${INSTALL_PROGRAM}'
+
+test -z "$INSTALL_DATA" && INSTALL_DATA='${INSTALL} -m 644'
+
+echo $ac_n "checking whether build environment is sane""... $ac_c" 1>&6
+echo "configure:712: checking whether build environment is sane" >&5
+# Just in case
+sleep 1
+echo timestamp > conftestfile
+# Do `set' in a subshell so we don't clobber the current shell's
+# arguments. Must try -L first in case configure is actually a
+# symlink; some systems play weird games with the mod time of symlinks
+# (eg FreeBSD returns the mod time of the symlink's containing
+# directory).
+if (
+ set X `ls -Lt $srcdir/configure conftestfile 2> /dev/null`
+ if test "$*" = "X"; then
+ # -L didn't work.
+ set X `ls -t $srcdir/configure conftestfile`
+ fi
+ if test "$*" != "X $srcdir/configure conftestfile" \
+ && test "$*" != "X conftestfile $srcdir/configure"; then
+
+ # If neither matched, then we have a broken ls. This can happen
+ # if, for instance, CONFIG_SHELL is bash and it inherits a
+ # broken ls alias from the environment. This has actually
+ # happened. Such a system could not be considered "sane".
+ { echo "configure: error: ls -t appears to fail. Make sure there is not a broken
+alias in your environment" 1>&2; exit 1; }
+ fi
+
+ test "$2" = conftestfile
+ )
+then
+ # Ok.
+ :
+else
+ { echo "configure: error: newly created file is older than distributed files!
+Check your system clock" 1>&2; exit 1; }
+fi
+rm -f conftest*
+echo "$ac_t""yes" 1>&6
+if test "$program_transform_name" = s,x,x,; then
+ program_transform_name=
+else
+ # Double any \ or $. echo might interpret backslashes.
+ cat <<\EOF_SED > conftestsed
+s,\\,\\\\,g; s,\$,$$,g
+EOF_SED
+ program_transform_name="`echo $program_transform_name|sed -f conftestsed`"
+ rm -f conftestsed
+fi
+test "$program_prefix" != NONE &&
+ program_transform_name="s,^,${program_prefix},; $program_transform_name"
+# Use a double $ so make ignores it.
+test "$program_suffix" != NONE &&
+ program_transform_name="s,\$\$,${program_suffix},; $program_transform_name"
+
+# sed with no file args requires a program.
+test "$program_transform_name" = "" && program_transform_name="s,x,x,"
+
+echo $ac_n "checking whether ${MAKE-make} sets \${MAKE}""... $ac_c" 1>&6
+echo "configure:769: checking whether ${MAKE-make} sets \${MAKE}" >&5
+set dummy ${MAKE-make}; ac_make=`echo "$2" | sed 'y%./+-%__p_%'`
+if eval "test \"`echo '$''{'ac_cv_prog_make_${ac_make}_set'+set}'`\" = set"; then
+ echo $ac_n "(cached) $ac_c" 1>&6
+else
+ cat > conftestmake <<\EOF
+all:
+ @echo 'ac_maketemp="${MAKE}"'
+EOF
+# GNU make sometimes prints "make[1]: Entering...", which would confuse us.
+eval `${MAKE-make} -f conftestmake 2>/dev/null | grep temp=`
+if test -n "$ac_maketemp"; then
+ eval ac_cv_prog_make_${ac_make}_set=yes
+else
+ eval ac_cv_prog_make_${ac_make}_set=no
+fi
+rm -f conftestmake
+fi
+if eval "test \"`echo '$ac_cv_prog_make_'${ac_make}_set`\" = yes"; then
+ echo "$ac_t""yes" 1>&6
+ SET_MAKE=
+else
+ echo "$ac_t""no" 1>&6
+ SET_MAKE="MAKE=${MAKE-make}"
+fi
+
+
+PACKAGE=rrdtool
+
+VERSION=1.0.7
+
+if test "`cd $srcdir && pwd`" != "`pwd`" && test -f $srcdir/config.status; then
+ { echo "configure: error: source directory already configured; run "make distclean" there first" 1>&2; exit 1; }
+fi
+cat >> confdefs.h <<EOF
+#define PACKAGE "$PACKAGE"
+EOF
+
+cat >> confdefs.h <<EOF
+#define VERSION "$VERSION"
+EOF
+
+
+
+missing_dir=`cd $ac_aux_dir && pwd`
+echo $ac_n "checking for working aclocal""... $ac_c" 1>&6
+echo "configure:815: checking for working aclocal" >&5
+# Run test in a subshell; some versions of sh will print an error if
+# an executable is not found, even if stderr is redirected.
+# Redirect stdin to placate older versions of autoconf. Sigh.
+if (aclocal --version) < /dev/null > /dev/null 2>&1; then
+ ACLOCAL=aclocal
+ echo "$ac_t""found" 1>&6
+else
+ ACLOCAL="$missing_dir/missing aclocal"
+ echo "$ac_t""missing" 1>&6
+fi
+
+echo $ac_n "checking for working autoconf""... $ac_c" 1>&6
+echo "configure:828: checking for working autoconf" >&5
+# Run test in a subshell; some versions of sh will print an error if
+# an executable is not found, even if stderr is redirected.
+# Redirect stdin to placate older versions of autoconf. Sigh.
+if (autoconf --version) < /dev/null > /dev/null 2>&1; then
+ AUTOCONF=autoconf
+ echo "$ac_t""found" 1>&6
+else
+ AUTOCONF="$missing_dir/missing autoconf"
+ echo "$ac_t""missing" 1>&6
+fi
+
+echo $ac_n "checking for working automake""... $ac_c" 1>&6
+echo "configure:841: checking for working automake" >&5
+# Run test in a subshell; some versions of sh will print an error if
+# an executable is not found, even if stderr is redirected.
+# Redirect stdin to placate older versions of autoconf. Sigh.
+if (automake --version) < /dev/null > /dev/null 2>&1; then
+ AUTOMAKE=automake
+ echo "$ac_t""found" 1>&6
+else
+ AUTOMAKE="$missing_dir/missing automake"
+ echo "$ac_t""missing" 1>&6
+fi
+
+echo $ac_n "checking for working autoheader""... $ac_c" 1>&6
+echo "configure:854: checking for working autoheader" >&5
+# Run test in a subshell; some versions of sh will print an error if
+# an executable is not found, even if stderr is redirected.
+# Redirect stdin to placate older versions of autoconf. Sigh.
+if (autoheader --version) < /dev/null > /dev/null 2>&1; then
+ AUTOHEADER=autoheader
+ echo "$ac_t""found" 1>&6
+else
+ AUTOHEADER="$missing_dir/missing autoheader"
+ echo "$ac_t""missing" 1>&6
+fi
+
+echo $ac_n "checking for working makeinfo""... $ac_c" 1>&6
+echo "configure:867: checking for working makeinfo" >&5
+# Run test in a subshell; some versions of sh will print an error if
+# an executable is not found, even if stderr is redirected.
+# Redirect stdin to placate older versions of autoconf. Sigh.
+if (makeinfo --version) < /dev/null > /dev/null 2>&1; then
+ MAKEINFO=makeinfo
+ echo "$ac_t""found" 1>&6
+else
+ MAKEINFO="$missing_dir/missing makeinfo"
+ echo "$ac_t""missing" 1>&6
+fi
+
+
+
+
+
+
+
+
+
+
+
+CGI_LIB_DIR=cgilib-0.4
+GD_LIB_DIR=gd1.3
+PNG_LIB_DIR=libpng-1.0.3
+ZLIB_LIB_DIR=zlib-1.1.3
+
+
+
+
+
+
+
+# Extract the first word of "perl", so it can be a program name with args.
+set dummy perl; ac_word=$2
+echo $ac_n "checking for $ac_word""... $ac_c" 1>&6
+echo "configure:903: checking for $ac_word" >&5
+if eval "test \"`echo '$''{'ac_cv_path_PERL'+set}'`\" = set"; then
+ echo $ac_n "(cached) $ac_c" 1>&6
+else
+ case "$PERL" in
+ /*)
+ ac_cv_path_PERL="$PERL" # Let the user override the test with a path.
+ ;;
+ ?:/*)
+ ac_cv_path_PERL="$PERL" # Let the user override the test with a dos path.
+ ;;
+ *)
+ IFS="${IFS= }"; ac_save_ifs="$IFS"; IFS=":"
+ ac_dummy="$PATH"
+ for ac_dir in $ac_dummy; do
+ test -z "$ac_dir" && ac_dir=.
+ if test -f $ac_dir/$ac_word; then
+ ac_cv_path_PERL="$ac_dir/$ac_word"
+ break
+ fi
+ done
+ IFS="$ac_save_ifs"
+ test -z "$ac_cv_path_PERL" && ac_cv_path_PERL="no"
+ ;;
+esac
+fi
+PERL="$ac_cv_path_PERL"
+if test -n "$PERL"; then
+ echo "$ac_t""$PERL" 1>&6
+else
+ echo "$ac_t""no" 1>&6
+fi
+
+if test "x$PERL" = "nox"; then
+ COMP_PERL=
+else
+ COMP_PERL="perl_piped perl_shared"
+fi
+
+
# Extract the first word of "gcc", so it can be a program name with args.
set dummy gcc; ac_word=$2
echo $ac_n "checking for $ac_word""... $ac_c" 1>&6
-echo "configure:530: checking for $ac_word" >&5
+echo "configure:947: checking for $ac_word" >&5
if eval "test \"`echo '$''{'ac_cv_prog_CC'+set}'`\" = set"; then
echo $ac_n "(cached) $ac_c" 1>&6
else
@@ -556,7 +973,7 @@
# Extract the first word of "cc", so it can be a program name with args.
set dummy cc; ac_word=$2
echo $ac_n "checking for $ac_word""... $ac_c" 1>&6
-echo "configure:560: checking for $ac_word" >&5
+echo "configure:977: checking for $ac_word" >&5
if eval "test \"`echo '$''{'ac_cv_prog_CC'+set}'`\" = set"; then
echo $ac_n "(cached) $ac_c" 1>&6
else
@@ -607,7 +1024,7 @@
# Extract the first word of "cl", so it can be a program name with args.
set dummy cl; ac_word=$2
echo $ac_n "checking for $ac_word""... $ac_c" 1>&6
-echo "configure:611: checking for $ac_word" >&5
+echo "configure:1028: checking for $ac_word" >&5
if eval "test \"`echo '$''{'ac_cv_prog_CC'+set}'`\" = set"; then
echo $ac_n "(cached) $ac_c" 1>&6
else
@@ -639,7 +1056,7 @@
fi
echo $ac_n "checking whether the C compiler ($CC $CFLAGS $LDFLAGS) works""... $ac_c" 1>&6
-echo "configure:643: checking whether the C compiler ($CC $CFLAGS $LDFLAGS) works" >&5
+echo "configure:1060: checking whether the C compiler ($CC $CFLAGS $LDFLAGS) works" >&5
ac_ext=c
# CFLAGS is not in ac_cpp because -g, -O, etc. are not valid cpp options.
@@ -650,12 +1067,12 @@
cat > conftest.$ac_ext << EOF
-#line 654 "configure"
+#line 1071 "configure"
#include "confdefs.h"
main(){return(0);}
EOF
-if { (eval echo configure:659: \"$ac_link\") 1>&5; (eval $ac_link) 2>&5; } && test -s conftest${ac_exeext}; then
+if { (eval echo configure:1076: \"$ac_link\") 1>&5; (eval $ac_link) 2>&5; } && test -s conftest${ac_exeext}; then
ac_cv_prog_cc_works=yes
# If we can't run a trivial program, we are probably using a cross compiler.
if (./conftest; exit) 2>/dev/null; then
@@ -681,12 +1098,12 @@
{ echo "configure: error: installation or configuration problem: C compiler cannot create executables." 1>&2; exit 1; }
fi
echo $ac_n "checking whether the C compiler ($CC $CFLAGS $LDFLAGS) is a cross-compiler""... $ac_c" 1>&6
-echo "configure:685: checking whether the C compiler ($CC $CFLAGS $LDFLAGS) is a cross-compiler" >&5
+echo "configure:1102: checking whether the C compiler ($CC $CFLAGS $LDFLAGS) is a cross-compiler" >&5
echo "$ac_t""$ac_cv_prog_cc_cross" 1>&6
cross_compiling=$ac_cv_prog_cc_cross
echo $ac_n "checking whether we are using GNU C""... $ac_c" 1>&6
-echo "configure:690: checking whether we are using GNU C" >&5
+echo "configure:1107: checking whether we are using GNU C" >&5
if eval "test \"`echo '$''{'ac_cv_prog_gcc'+set}'`\" = set"; then
echo $ac_n "(cached) $ac_c" 1>&6
else
@@ -695,7 +1112,7 @@
yes;
#endif
EOF
-if { ac_try='${CC-cc} -E conftest.c'; { (eval echo configure:699: \"$ac_try\") 1>&5; (eval $ac_try) 2>&5; }; } | egrep yes >/dev/null 2>&1; then
+if { ac_try='${CC-cc} -E conftest.c'; { (eval echo configure:1116: \"$ac_try\") 1>&5; (eval $ac_try) 2>&5; }; } | egrep yes >/dev/null 2>&1; then
ac_cv_prog_gcc=yes
else
ac_cv_prog_gcc=no
@@ -714,7 +1131,7 @@
ac_save_CFLAGS="$CFLAGS"
CFLAGS=
echo $ac_n "checking whether ${CC-cc} accepts -g""... $ac_c" 1>&6
-echo "configure:718: checking whether ${CC-cc} accepts -g" >&5
+echo "configure:1135: checking whether ${CC-cc} accepts -g" >&5
if eval "test \"`echo '$''{'ac_cv_prog_cc_g'+set}'`\" = set"; then
echo $ac_n "(cached) $ac_c" 1>&6
else
@@ -746,7 +1163,7 @@
fi
echo $ac_n "checking how to run the C preprocessor""... $ac_c" 1>&6
-echo "configure:750: checking how to run the C preprocessor" >&5
+echo "configure:1167: checking how to run the C preprocessor" >&5
# On Suns, sometimes $CPP names a directory.
if test -n "$CPP" && test -d "$CPP"; then
CPP=
@@ -761,13 +1178,13 @@
# On the NeXT, cc -E runs the code through the compiler's parser,
# not just through cpp.
cat > conftest.$ac_ext <<EOF
-#line 765 "configure"
+#line 1182 "configure"
#include "confdefs.h"
#include <assert.h>
Syntax Error
EOF
ac_try="$ac_cpp conftest.$ac_ext >/dev/null 2>conftest.out"
-{ (eval echo configure:771: \"$ac_try\") 1>&5; (eval $ac_try) 2>&5; }
+{ (eval echo configure:1188: \"$ac_try\") 1>&5; (eval $ac_try) 2>&5; }
ac_err=`grep -v '^ *+' conftest.out | grep -v "^conftest.${ac_ext}\$"`
if test -z "$ac_err"; then
:
@@ -778,13 +1195,13 @@
rm -rf conftest*
CPP="${CC-cc} -E -traditional-cpp"
cat > conftest.$ac_ext <<EOF
-#line 782 "configure"
+#line 1199 "configure"
#include "confdefs.h"
#include <assert.h>
Syntax Error
EOF
ac_try="$ac_cpp conftest.$ac_ext >/dev/null 2>conftest.out"
-{ (eval echo configure:788: \"$ac_try\") 1>&5; (eval $ac_try) 2>&5; }
+{ (eval echo configure:1205: \"$ac_try\") 1>&5; (eval $ac_try) 2>&5; }
ac_err=`grep -v '^ *+' conftest.out | grep -v "^conftest.${ac_ext}\$"`
if test -z "$ac_err"; then
:
@@ -795,13 +1212,13 @@
rm -rf conftest*
CPP="${CC-cc} -nologo -E"
cat > conftest.$ac_ext <<EOF
-#line 799 "configure"
+#line 1216 "configure"
#include "confdefs.h"
#include <assert.h>
Syntax Error
EOF
ac_try="$ac_cpp conftest.$ac_ext >/dev/null 2>conftest.out"
-{ (eval echo configure:805: \"$ac_try\") 1>&5; (eval $ac_try) 2>&5; }
+{ (eval echo configure:1222: \"$ac_try\") 1>&5; (eval $ac_try) 2>&5; }
ac_err=`grep -v '^ *+' conftest.out | grep -v "^conftest.${ac_ext}\$"`
if test -z "$ac_err"; then
:
@@ -819,147 +1236,87 @@
rm -f conftest*
ac_cv_prog_CPP="$CPP"
fi
- CPP="$ac_cv_prog_CPP"
-else
- ac_cv_prog_CPP="$CPP"
-fi
-echo "$ac_t""$CPP" 1>&6
-
-ac_aux_dir=
-for ac_dir in $srcdir $srcdir/.. $srcdir/../..; do
- if test -f $ac_dir/install-sh; then
- ac_aux_dir=$ac_dir
- ac_install_sh="$ac_aux_dir/install-sh -c"
- break
- elif test -f $ac_dir/install.sh; then
- ac_aux_dir=$ac_dir
- ac_install_sh="$ac_aux_dir/install.sh -c"
- break
- fi
-done
-if test -z "$ac_aux_dir"; then
- { echo "configure: error: can not find install-sh or install.sh in $srcdir $srcdir/.. $srcdir/../.." 1>&2; exit 1; }
-fi
-ac_config_guess=$ac_aux_dir/config.guess
-ac_config_sub=$ac_aux_dir/config.sub
-ac_configure=$ac_aux_dir/configure # This should be Cygnus configure.
-
-# Find a good install program. We prefer a C program (faster),
-# so one script is as good as another. But avoid the broken or
-# incompatible versions:
-# SysV /etc/install, /usr/sbin/install
-# SunOS /usr/etc/install
-# IRIX /sbin/install
-# AIX /bin/install
-# AIX 4 /usr/bin/installbsd, which doesn't work without a -g flag
-# AFS /usr/afsws/bin/install, which mishandles nonexistent args
-# SVR4 /usr/ucb/install, which tries to use the nonexistent group "staff"
-# ./install, which can be erroneously created by make from ./install.sh.
-echo $ac_n "checking for a BSD compatible install""... $ac_c" 1>&6
-echo "configure:860: checking for a BSD compatible install" >&5
-if test -z "$INSTALL"; then
-if eval "test \"`echo '$''{'ac_cv_path_install'+set}'`\" = set"; then
- echo $ac_n "(cached) $ac_c" 1>&6
-else
- IFS="${IFS= }"; ac_save_IFS="$IFS"; IFS=":"
- for ac_dir in $PATH; do
- # Account for people who put trailing slashes in PATH elements.
- case "$ac_dir/" in
- /|./|.//|/etc/*|/usr/sbin/*|/usr/etc/*|/sbin/*|/usr/afsws/bin/*|/usr/ucb/*) ;;
- *)
- # OSF1 and SCO ODT 3.0 have their own names for install.
- # Don't use installbsd from OSF since it installs stuff as root
- # by default.
- for ac_prog in ginstall scoinst install; do
- if test -f $ac_dir/$ac_prog; then
- if test $ac_prog = install &&
- grep dspmsg $ac_dir/$ac_prog >/dev/null 2>&1; then
- # AIX install. It has an incompatible calling convention.
- :
- else
- ac_cv_path_install="$ac_dir/$ac_prog -c"
- break 2
- fi
- fi
- done
- ;;
- esac
- done
- IFS="$ac_save_IFS"
-
-fi
- if test "${ac_cv_path_install+set}" = set; then
- INSTALL="$ac_cv_path_install"
- else
- # As a last resort, use the slow shell script. We don't cache a
- # path for INSTALL within a source directory, because that will
- # break other packages using the cache if that directory is
- # removed, or if the path is relative.
- INSTALL="$ac_install_sh"
- fi
-fi
-echo "$ac_t""$INSTALL" 1>&6
-
-# Use test -z because SunOS4 sh mishandles braces in ${var-val}.
-# It thinks the first close brace ends the variable substitution.
-test -z "$INSTALL_PROGRAM" && INSTALL_PROGRAM='${INSTALL}'
-
-test -z "$INSTALL_SCRIPT" && INSTALL_SCRIPT='${INSTALL_PROGRAM}'
-
-test -z "$INSTALL_DATA" && INSTALL_DATA='${INSTALL} -m 644'
-
-echo $ac_n "checking whether ln -s works""... $ac_c" 1>&6
-echo "configure:913: checking whether ln -s works" >&5
-if eval "test \"`echo '$''{'ac_cv_prog_LN_S'+set}'`\" = set"; then
- echo $ac_n "(cached) $ac_c" 1>&6
-else
- rm -f conftestdata
-if ln -s X conftestdata 2>/dev/null
-then
- rm -f conftestdata
- ac_cv_prog_LN_S="ln -s"
-else
- ac_cv_prog_LN_S=ln
-fi
-fi
-LN_S="$ac_cv_prog_LN_S"
-if test "$ac_cv_prog_LN_S" = "ln -s"; then
- echo "$ac_t""yes" 1>&6
+ CPP="$ac_cv_prog_CPP"
else
- echo "$ac_t""no" 1>&6
+ ac_cv_prog_CPP="$CPP"
fi
+echo "$ac_t""$CPP" 1>&6
-echo $ac_n "checking whether ${MAKE-make} sets \${MAKE}""... $ac_c" 1>&6
-echo "configure:934: checking whether ${MAKE-make} sets \${MAKE}" >&5
-set dummy ${MAKE-make}; ac_make=`echo "$2" | sed 'y%./+-%__p_%'`
-if eval "test \"`echo '$''{'ac_cv_prog_make_${ac_make}_set'+set}'`\" = set"; then
- echo $ac_n "(cached) $ac_c" 1>&6
-else
- cat > conftestmake <<\EOF
-all:
- @echo 'ac_maketemp="${MAKE}"'
-EOF
-# GNU make sometimes prints "make[1]: Entering...", which would confuse us.
-eval `${MAKE-make} -f conftestmake 2>/dev/null | grep temp=`
-if test -n "$ac_maketemp"; then
- eval ac_cv_prog_make_${ac_make}_set=yes
+
+# Check whether --enable-shared or --disable-shared was given.
+if test "${enable_shared+set}" = set; then
+ enableval="$enable_shared"
+ p=${PACKAGE-default}
+case "$enableval" in
+yes) enable_shared=yes ;;
+no) enable_shared=no ;;
+*)
+ enable_shared=no
+ # Look at the argument we got. We use all the common list separators.
+ IFS="${IFS= }"; ac_save_ifs="$IFS"; IFS="${IFS}:,"
+ for pkg in $enableval; do
+ if test "X$pkg" = "X$p"; then
+ enable_shared=yes
+ fi
+ done
+ IFS="$ac_save_ifs"
+ ;;
+esac
else
- eval ac_cv_prog_make_${ac_make}_set=no
+ enable_shared=no
fi
-rm -f conftestmake
+
+
+# Check whether --enable-static or --disable-static was given.
+if test "${enable_static+set}" = set; then
+ enableval="$enable_static"
+ p=${PACKAGE-default}
+case "$enableval" in
+yes) enable_static=yes ;;
+no) enable_static=no ;;
+*)
+ enable_static=no
+ # Look at the argument we got. We use all the common list separators.
+ IFS="${IFS= }"; ac_save_ifs="$IFS"; IFS="${IFS}:,"
+ for pkg in $enableval; do
+ if test "X$pkg" = "X$p"; then
+ enable_static=yes
+ fi
+ done
+ IFS="$ac_save_ifs"
+ ;;
+esac
+else
+ enable_static=yes
fi
-if eval "test \"`echo '$ac_cv_prog_make_'${ac_make}_set`\" = yes"; then
- echo "$ac_t""yes" 1>&6
- SET_MAKE=
+
+# Check whether --enable-fast-install or --disable-fast-install was given.
+if test "${enable_fast_install+set}" = set; then
+ enableval="$enable_fast_install"
+ p=${PACKAGE-default}
+case "$enableval" in
+yes) enable_fast_install=yes ;;
+no) enable_fast_install=no ;;
+*)
+ enable_fast_install=no
+ # Look at the argument we got. We use all the common list separators.
+ IFS="${IFS= }"; ac_save_ifs="$IFS"; IFS="${IFS}:,"
+ for pkg in $enableval; do
+ if test "X$pkg" = "X$p"; then
+ enable_fast_install=yes
+ fi
+ done
+ IFS="$ac_save_ifs"
+ ;;
+esac
else
- echo "$ac_t""no" 1>&6
- SET_MAKE="MAKE=${MAKE-make}"
+ enable_fast_install=yes
fi
# Extract the first word of "ranlib", so it can be a program name with args.
set dummy ranlib; ac_word=$2
echo $ac_n "checking for $ac_word""... $ac_c" 1>&6
-echo "configure:963: checking for $ac_word" >&5
+echo "configure:1320: checking for $ac_word" >&5
if eval "test \"`echo '$''{'ac_cv_prog_RANLIB'+set}'`\" = set"; then
echo $ac_n "(cached) $ac_c" 1>&6
else
@@ -986,99 +1343,321 @@
echo "$ac_t""no" 1>&6
fi
-# Extract the first word of "perl", so it can be a program name with args.
-set dummy perl; ac_word=$2
-echo $ac_n "checking for $ac_word""... $ac_c" 1>&6
-echo "configure:993: checking for $ac_word" >&5
-if eval "test \"`echo '$''{'ac_cv_path_PERL'+set}'`\" = set"; then
+# Check whether --with-gnu-ld or --without-gnu-ld was given.
+if test "${with_gnu_ld+set}" = set; then
+ withval="$with_gnu_ld"
+ test "$withval" = no || with_gnu_ld=yes
+else
+ with_gnu_ld=no
+fi
+
+ac_prog=ld
+if test "$ac_cv_prog_gcc" = yes; then
+ # Check if gcc -print-prog-name=ld gives a path.
+ echo $ac_n "checking for ld used by GCC""... $ac_c" 1>&6
+echo "configure:1359: checking for ld used by GCC" >&5
+ ac_prog=`($CC -print-prog-name=ld) 2>&5`
+ case "$ac_prog" in
+ # Accept absolute paths.
+ [\\/]* | [A-Za-z]:[\\/]*)
+ re_direlt='/[^/][^/]*/\.\./'
+ # Canonicalize the path of ld
+ ac_prog=`echo $ac_prog| sed 's%\\\\%/%g'`
+ while echo $ac_prog | grep "$re_direlt" > /dev/null 2>&1; do
+ ac_prog=`echo $ac_prog| sed "s%$re_direlt%/%"`
+ done
+ test -z "$LD" && LD="$ac_prog"
+ ;;
+ "")
+ # If it fails, then pretend we aren't using GCC.
+ ac_prog=ld
+ ;;
+ *)
+ # If it is relative, then search for the first ld in PATH.
+ with_gnu_ld=unknown
+ ;;
+ esac
+elif test "$with_gnu_ld" = yes; then
+ echo $ac_n "checking for GNU ld""... $ac_c" 1>&6
+echo "configure:1383: checking for GNU ld" >&5
+else
+ echo $ac_n "checking for non-GNU ld""... $ac_c" 1>&6
+echo "configure:1386: checking for non-GNU ld" >&5
+fi
+if eval "test \"`echo '$''{'ac_cv_path_LD'+set}'`\" = set"; then
echo $ac_n "(cached) $ac_c" 1>&6
else
- case "$PERL" in
- /*)
- ac_cv_path_PERL="$PERL" # Let the user override the test with a path.
- ;;
- ?:/*)
- ac_cv_path_PERL="$PERL" # Let the user override the test with a dos path.
- ;;
- *)
- IFS="${IFS= }"; ac_save_ifs="$IFS"; IFS=":"
- ac_dummy="$PATH"
- for ac_dir in $ac_dummy; do
+ if test -z "$LD"; then
+ IFS="${IFS= }"; ac_save_ifs="$IFS"; IFS="${IFS}${PATH_SEPARATOR-:}"
+ for ac_dir in $PATH; do
test -z "$ac_dir" && ac_dir=.
- if test -f $ac_dir/$ac_word; then
- ac_cv_path_PERL="$ac_dir/$ac_word"
- break
+ if test -f "$ac_dir/$ac_prog" || test -f "$ac_dir/$ac_prog$ac_exeext"; then
+ ac_cv_path_LD="$ac_dir/$ac_prog"
+ # Check to see if the program is GNU ld. I'd rather use --version,
+ # but apparently some GNU ld's only accept -v.
+ # Break only if it was the GNU/non-GNU ld that we prefer.
+ if "$ac_cv_path_LD" -v 2>&1 < /dev/null | egrep '(GNU|with BFD)' > /dev/null; then
+ test "$with_gnu_ld" != no && break
+ else
+ test "$with_gnu_ld" != yes && break
+ fi
fi
done
IFS="$ac_save_ifs"
- test -z "$ac_cv_path_PERL" && ac_cv_path_PERL="no"
- ;;
-esac
+else
+ ac_cv_path_LD="$LD" # Let the user override the test with a path.
fi
-PERL="$ac_cv_path_PERL"
-if test -n "$PERL"; then
- echo "$ac_t""$PERL" 1>&6
+fi
+
+LD="$ac_cv_path_LD"
+if test -n "$LD"; then
+ echo "$ac_t""$LD" 1>&6
else
echo "$ac_t""no" 1>&6
fi
+test -z "$LD" && { echo "configure: error: no acceptable ld found in \$PATH" 1>&2; exit 1; }
+echo $ac_n "checking if the linker ($LD) is GNU ld""... $ac_c" 1>&6
+echo "configure:1422: checking if the linker ($LD) is GNU ld" >&5
+if eval "test \"`echo '$''{'ac_cv_prog_gnu_ld'+set}'`\" = set"; then
+ echo $ac_n "(cached) $ac_c" 1>&6
+else
+ # I'd rather use --version here, but apparently some GNU ld's only accept -v.
+if $LD -v 2>&1 </dev/null | egrep '(GNU|with BFD)' 1>&5; then
+ ac_cv_prog_gnu_ld=yes
+else
+ ac_cv_prog_gnu_ld=no
+fi
+fi
+echo "$ac_t""$ac_cv_prog_gnu_ld" 1>&6
-echo $ac_n "checking for acos in -lm""... $ac_c" 1>&6
-echo "configure:1029: checking for acos in -lm" >&5
-ac_lib_var=`echo m'_'acos | sed 'y%./+-%__p_%'`
-if eval "test \"`echo '$''{'ac_cv_lib_$ac_lib_var'+set}'`\" = set"; then
+
+echo $ac_n "checking for BSD-compatible nm""... $ac_c" 1>&6
+echo "configure:1438: checking for BSD-compatible nm" >&5
+if eval "test \"`echo '$''{'ac_cv_path_NM'+set}'`\" = set"; then
echo $ac_n "(cached) $ac_c" 1>&6
else
- ac_save_LIBS="$LIBS"
-LIBS="-lm $LIBS"
-cat > conftest.$ac_ext <<EOF
-#line 1037 "configure"
+ if test -n "$NM"; then
+ # Let the user override the test.
+ ac_cv_path_NM="$NM"
+else
+ IFS="${IFS= }"; ac_save_ifs="$IFS"; IFS="${IFS}${PATH_SEPARATOR-:}"
+ for ac_dir in $PATH /usr/ccs/bin /usr/ucb /bin; do
+ test -z "$ac_dir" && ac_dir=.
+ if test -f $ac_dir/nm || test -f $ac_dir/nm$ac_exeext ; then
+ # Check to see if the nm accepts a BSD-compat flag.
+ # Adding the `sed 1q' prevents false positives on HP-UX, which says:
+ # nm: unknown option "B" ignored
+ if ($ac_dir/nm -B /dev/null 2>&1 | sed '1q'; exit 0) | egrep /dev/null >/dev/null; then
+ ac_cv_path_NM="$ac_dir/nm -B"
+ break
+ elif ($ac_dir/nm -p /dev/null 2>&1 | sed '1q'; exit 0) | egrep /dev/null >/dev/null; then
+ ac_cv_path_NM="$ac_dir/nm -p"
+ break
+ else
+ ac_cv_path_NM=${ac_cv_path_NM="$ac_dir/nm"} # keep the first match, but
+ continue # so that we can try to find one that supports BSD flags
+ fi
+ fi
+ done
+ IFS="$ac_save_ifs"
+ test -z "$ac_cv_path_NM" && ac_cv_path_NM=nm
+fi
+fi
+
+NM="$ac_cv_path_NM"
+echo "$ac_t""$NM" 1>&6
+
+
+echo $ac_n "checking whether ln -s works""... $ac_c" 1>&6
+echo "configure:1475: checking whether ln -s works" >&5
+if eval "test \"`echo '$''{'ac_cv_prog_LN_S'+set}'`\" = set"; then
+ echo $ac_n "(cached) $ac_c" 1>&6
+else
+ rm -f conftestdata
+if ln -s X conftestdata 2>/dev/null
+then
+ rm -f conftestdata
+ ac_cv_prog_LN_S="ln -s"
+else
+ ac_cv_prog_LN_S=ln
+fi
+fi
+LN_S="$ac_cv_prog_LN_S"
+if test "$ac_cv_prog_LN_S" = "ln -s"; then
+ echo "$ac_t""yes" 1>&6
+else
+ echo "$ac_t""no" 1>&6
+fi
+
+
+# Check for any special flags to pass to ltconfig.
+libtool_flags="--cache-file=$cache_file"
+test "$enable_shared" = no && libtool_flags="$libtool_flags --disable-shared"
+test "$enable_static" = no && libtool_flags="$libtool_flags --disable-static"
+test "$enable_fast_install" = no && libtool_flags="$libtool_flags --disable-fast-install"
+test "$ac_cv_prog_gcc" = yes && libtool_flags="$libtool_flags --with-gcc"
+test "$ac_cv_prog_gnu_ld" = yes && libtool_flags="$libtool_flags --with-gnu-ld"
+
+
+# Check whether --enable-libtool-lock or --disable-libtool-lock was given.
+if test "${enable_libtool_lock+set}" = set; then
+ enableval="$enable_libtool_lock"
+ :
+fi
+
+test "x$enable_libtool_lock" = xno && libtool_flags="$libtool_flags --disable-lock"
+test x"$silent" = xyes && libtool_flags="$libtool_flags --silent"
+
+# Some flags need to be propagated to the compiler or linker for good
+# libtool support.
+case "$host" in
+*-*-irix6*)
+ # Find out which ABI we are using.
+ echo '#line 1519 "configure"' > conftest.$ac_ext
+ if { (eval echo configure:1520: \"$ac_compile\") 1>&5; (eval $ac_compile) 2>&5; }; then
+ case "`/usr/bin/file conftest.o`" in
+ *32-bit*)
+ LD="${LD-ld} -32"
+ ;;
+ *N32*)
+ LD="${LD-ld} -n32"
+ ;;
+ *64-bit*)
+ LD="${LD-ld} -64"
+ ;;
+ esac
+ fi
+ rm -rf conftest*
+ ;;
+
+*-*-sco3.2v5*)
+ # On SCO OpenServer 5, we need -belf to get full-featured binaries.
+ SAVE_CFLAGS="$CFLAGS"
+ CFLAGS="$CFLAGS -belf"
+ echo $ac_n "checking whether the C compiler needs -belf""... $ac_c" 1>&6
+echo "configure:1541: checking whether the C compiler needs -belf" >&5
+if eval "test \"`echo '$''{'lt_cv_cc_needs_belf'+set}'`\" = set"; then
+ echo $ac_n "(cached) $ac_c" 1>&6
+else
+ cat > conftest.$ac_ext <<EOF
+#line 1546 "configure"
#include "confdefs.h"
-/* Override any gcc2 internal prototype to avoid an error. */
-/* We use char because int might match the return type of a gcc2
- builtin and then its argument prototype would still apply. */
-char acos();
int main() {
-acos()
+
; return 0; }
EOF
-if { (eval echo configure:1048: \"$ac_link\") 1>&5; (eval $ac_link) 2>&5; } && test -s conftest${ac_exeext}; then
+if { (eval echo configure:1553: \"$ac_link\") 1>&5; (eval $ac_link) 2>&5; } && test -s conftest${ac_exeext}; then
rm -rf conftest*
- eval "ac_cv_lib_$ac_lib_var=yes"
+ lt_cv_cc_needs_belf=yes
else
echo "configure: failed program was:" >&5
cat conftest.$ac_ext >&5
rm -rf conftest*
- eval "ac_cv_lib_$ac_lib_var=no"
+ lt_cv_cc_needs_belf=no
fi
rm -f conftest*
-LIBS="$ac_save_LIBS"
-
fi
-if eval "test \"`echo '$ac_cv_lib_'$ac_lib_var`\" = yes"; then
- echo "$ac_t""yes" 1>&6
- ac_tr_lib=HAVE_LIB`echo m | sed -e 's/[^a-zA-Z0-9_]/_/g' \
- -e 'y/abcdefghijklmnopqrstuvwxyz/ABCDEFGHIJKLMNOPQRSTUVWXYZ/'`
- cat >> confdefs.h <<EOF
-#define $ac_tr_lib 1
+
+echo "$ac_t""$lt_cv_cc_needs_belf" 1>&6
+ if test x"$lt_cv_cc_needs_belf" != x"yes"; then
+ # this is probably gcc 2.8.0, egcs 1.0 or newer; no need for -belf
+ CFLAGS="$SAVE_CFLAGS"
+ fi
+ ;;
+
+
+esac
+
+
+# Save cache, so that ltconfig can load it
+cat > confcache <<\EOF
+# This file is a shell script that caches the results of configure
+# tests run on this system so they can be shared between configure
+# scripts and configure runs. It is not useful on other systems.
+# If it contains results you don't want to keep, you may remove or edit it.
+#
+# By default, configure uses ./config.cache as the cache file,
+# creating it if it does not exist already. You can give configure
+# the --cache-file=FILE option to use a different cache file; that is
+# what configure does when it calls configure scripts in
+# subdirectories, so they share the cache.
+# Giving --cache-file=/dev/null disables caching, for debugging configure.
+# config.status only pays attention to the cache file if you give it the
+# --recheck option to rerun configure.
+#
EOF
+# The following way of writing the cache mishandles newlines in values,
+# but we know of no workaround that is simple, portable, and efficient.
+# So, don't put newlines in cache variables' values.
+# Ultrix sh set writes to stderr and can't be redirected directly,
+# and sets the high bit in the cache file unless we assign to the vars.
+(set) 2>&1 |
+ case `(ac_space=' '; set | grep ac_space) 2>&1` in
+ *ac_space=\ *)
+ # `set' does not quote correctly, so add quotes (double-quote substitution
+ # turns \\\\ into \\, and sed turns \\ into \).
+ sed -n \
+ -e "s/'/'\\\\''/g" \
+ -e "s/^\\([a-zA-Z0-9_]*_cv_[a-zA-Z0-9_]*\\)=\\(.*\\)/\\1=\${\\1='\\2'}/p"
+ ;;
+ *)
+ # `set' quotes correctly as required by POSIX, so do not add quotes.
+ sed -n -e 's/^\([a-zA-Z0-9_]*_cv_[a-zA-Z0-9_]*\)=\(.*\)/\1=${\1=\2}/p'
+ ;;
+ esac >> confcache
+if cmp -s $cache_file confcache; then
+ :
+else
+ if test -w $cache_file; then
+ echo "updating cache $cache_file"
+ cat confcache > $cache_file
+ else
+ echo "not updating unwritable cache $cache_file"
+ fi
+fi
+rm -f confcache
- LIBS="-lm $LIBS"
+# Actually configure libtool. ac_aux_dir is where install-sh is found.
+CC="$CC" CFLAGS="$CFLAGS" CPPFLAGS="$CPPFLAGS" \
+LD="$LD" LDFLAGS="$LDFLAGS" LIBS="$LIBS" \
+LN_S="$LN_S" NM="$NM" RANLIB="$RANLIB" \
+DLLTOOL="$DLLTOOL" AS="$AS" OBJDUMP="$OBJDUMP" \
+${CONFIG_SHELL-/bin/sh} $ac_aux_dir/ltconfig --no-reexec \
+$libtool_flags --no-verify $ac_aux_dir/ltmain.sh $host \
+|| { echo "configure: error: libtool configure failed" 1>&2; exit 1; }
+
+# Reload cache, that may have been modified by ltconfig
+if test -r "$cache_file"; then
+ echo "loading cache $cache_file"
+ . $cache_file
else
- echo "$ac_t""no" 1>&6
+ echo "creating cache $cache_file"
+ > $cache_file
fi
+# This can be used to rebuild libtool when needed
+LIBTOOL_DEPS="$ac_aux_dir/ltconfig $ac_aux_dir/ltmain.sh"
+
+# Always use our own libtool.
+LIBTOOL='$(SHELL) $(top_builddir)/libtool'
+
+# Redirect the config.log output again, so that the ltconfig log is not
+# clobbered by the next message.
+exec 5>>./config.log
+
+
echo $ac_n "checking for ANSI C header files""... $ac_c" 1>&6
-echo "configure:1077: checking for ANSI C header files" >&5
+echo "configure:1656: checking for ANSI C header files" >&5
if eval "test \"`echo '$''{'ac_cv_header_stdc'+set}'`\" = set"; then
echo $ac_n "(cached) $ac_c" 1>&6
else
cat > conftest.$ac_ext <<EOF
-#line 1082 "configure"
+#line 1661 "configure"
#include "confdefs.h"
#include <stdlib.h>
#include <stdarg.h>
@@ -1086,7 +1665,7 @@
#include <float.h>
EOF
ac_try="$ac_cpp conftest.$ac_ext >/dev/null 2>conftest.out"
-{ (eval echo configure:1090: \"$ac_try\") 1>&5; (eval $ac_try) 2>&5; }
+{ (eval echo configure:1669: \"$ac_try\") 1>&5; (eval $ac_try) 2>&5; }
ac_err=`grep -v '^ *+' conftest.out | grep -v "^conftest.${ac_ext}\$"`
if test -z "$ac_err"; then
rm -rf conftest*
@@ -1103,7 +1682,7 @@
if test $ac_cv_header_stdc = yes; then
# SunOS 4.x string.h does not declare mem*, contrary to ANSI.
cat > conftest.$ac_ext <<EOF
-#line 1107 "configure"
+#line 1686 "configure"
#include "confdefs.h"
#include <string.h>
EOF
@@ -1121,7 +1700,7 @@
if test $ac_cv_header_stdc = yes; then
# ISC 2.0.2 stdlib.h does not declare free, contrary to ANSI.
cat > conftest.$ac_ext <<EOF
-#line 1125 "configure"
+#line 1704 "configure"
#include "confdefs.h"
#include <stdlib.h>
EOF
@@ -1142,7 +1721,7 @@
:
else
cat > conftest.$ac_ext <<EOF
-#line 1146 "configure"
+#line 1725 "configure"
#include "confdefs.h"
#include <ctype.h>
#define ISLOWER(c) ('a' <= (c) && (c) <= 'z')
@@ -1153,7 +1732,7 @@
exit (0); }
EOF
-if { (eval echo configure:1157: \"$ac_link\") 1>&5; (eval $ac_link) 2>&5; } && test -s conftest${ac_exeext} && (./conftest; exit) 2>/dev/null
+if { (eval echo configure:1736: \"$ac_link\") 1>&5; (eval $ac_link) 2>&5; } && test -s conftest${ac_exeext} && (./conftest; exit) 2>/dev/null
then
:
else
@@ -1180,17 +1759,17 @@
do
ac_safe=`echo "$ac_hdr" | sed 'y%./+-%__p_%'`
echo $ac_n "checking for $ac_hdr""... $ac_c" 1>&6
-echo "configure:1184: checking for $ac_hdr" >&5
+echo "configure:1763: checking for $ac_hdr" >&5
if eval "test \"`echo '$''{'ac_cv_header_$ac_safe'+set}'`\" = set"; then
echo $ac_n "(cached) $ac_c" 1>&6
else
cat > conftest.$ac_ext <<EOF
-#line 1189 "configure"
+#line 1768 "configure"
#include "confdefs.h"
#include <$ac_hdr>
EOF
ac_try="$ac_cpp conftest.$ac_ext >/dev/null 2>conftest.out"
-{ (eval echo configure:1194: \"$ac_try\") 1>&5; (eval $ac_try) 2>&5; }
+{ (eval echo configure:1773: \"$ac_try\") 1>&5; (eval $ac_try) 2>&5; }
ac_err=`grep -v '^ *+' conftest.out | grep -v "^conftest.${ac_ext}\$"`
if test -z "$ac_err"; then
rm -rf conftest*
@@ -1218,12 +1797,12 @@
echo $ac_n "checking for working const""... $ac_c" 1>&6
-echo "configure:1222: checking for working const" >&5
+echo "configure:1801: checking for working const" >&5
if eval "test \"`echo '$''{'ac_cv_c_const'+set}'`\" = set"; then
echo $ac_n "(cached) $ac_c" 1>&6
else
cat > conftest.$ac_ext <<EOF
-#line 1227 "configure"
+#line 1806 "configure"
#include "confdefs.h"
int main() {
@@ -1272,7 +1851,7 @@
; return 0; }
EOF
-if { (eval echo configure:1276: \"$ac_compile\") 1>&5; (eval $ac_compile) 2>&5; }; then
+if { (eval echo configure:1855: \"$ac_compile\") 1>&5; (eval $ac_compile) 2>&5; }; then
rm -rf conftest*
ac_cv_c_const=yes
else
@@ -1293,12 +1872,12 @@
fi
echo $ac_n "checking whether time.h and sys/time.h may both be included""... $ac_c" 1>&6
-echo "configure:1297: checking whether time.h and sys/time.h may both be included" >&5
+echo "configure:1876: checking whether time.h and sys/time.h may both be included" >&5
if eval "test \"`echo '$''{'ac_cv_header_time'+set}'`\" = set"; then
echo $ac_n "(cached) $ac_c" 1>&6
else
cat > conftest.$ac_ext <<EOF
-#line 1302 "configure"
+#line 1881 "configure"
#include "confdefs.h"
#include <sys/types.h>
#include <sys/time.h>
@@ -1307,7 +1886,7 @@
struct tm *tp;
; return 0; }
EOF
-if { (eval echo configure:1311: \"$ac_compile\") 1>&5; (eval $ac_compile) 2>&5; }; then
+if { (eval echo configure:1890: \"$ac_compile\") 1>&5; (eval $ac_compile) 2>&5; }; then
rm -rf conftest*
ac_cv_header_time=yes
else
@@ -1328,59 +1907,106 @@
fi
echo $ac_n "checking whether struct tm is in sys/time.h or time.h""... $ac_c" 1>&6
-echo "configure:1332: checking whether struct tm is in sys/time.h or time.h" >&5
+echo "configure:1911: checking whether struct tm is in sys/time.h or time.h" >&5
if eval "test \"`echo '$''{'ac_cv_struct_tm'+set}'`\" = set"; then
echo $ac_n "(cached) $ac_c" 1>&6
else
cat > conftest.$ac_ext <<EOF
-#line 1337 "configure"
+#line 1916 "configure"
+#include "confdefs.h"
+#include <sys/types.h>
+#include <time.h>
+int main() {
+struct tm *tp; tp->tm_sec;
+; return 0; }
+EOF
+if { (eval echo configure:1924: \"$ac_compile\") 1>&5; (eval $ac_compile) 2>&5; }; then
+ rm -rf conftest*
+ ac_cv_struct_tm=time.h
+else
+ echo "configure: failed program was:" >&5
+ cat conftest.$ac_ext >&5
+ rm -rf conftest*
+ ac_cv_struct_tm=sys/time.h
+fi
+rm -f conftest*
+fi
+
+echo "$ac_t""$ac_cv_struct_tm" 1>&6
+if test $ac_cv_struct_tm = sys/time.h; then
+ cat >> confdefs.h <<\EOF
+#define TM_IN_SYS_TIME 1
+EOF
+
+fi
+
+
+echo $ac_n "checking for acos in -lm""... $ac_c" 1>&6
+echo "configure:1946: checking for acos in -lm" >&5
+ac_lib_var=`echo m'_'acos | sed 'y%./+-%__p_%'`
+if eval "test \"`echo '$''{'ac_cv_lib_$ac_lib_var'+set}'`\" = set"; then
+ echo $ac_n "(cached) $ac_c" 1>&6
+else
+ ac_save_LIBS="$LIBS"
+LIBS="-lm $LIBS"
+cat > conftest.$ac_ext <<EOF
+#line 1954 "configure"
#include "confdefs.h"
-#include <sys/types.h>
-#include <time.h>
+/* Override any gcc2 internal prototype to avoid an error. */
+/* We use char because int might match the return type of a gcc2
+ builtin and then its argument prototype would still apply. */
+char acos();
+
int main() {
-struct tm *tp; tp->tm_sec;
+acos()
; return 0; }
EOF
-if { (eval echo configure:1345: \"$ac_compile\") 1>&5; (eval $ac_compile) 2>&5; }; then
+if { (eval echo configure:1965: \"$ac_link\") 1>&5; (eval $ac_link) 2>&5; } && test -s conftest${ac_exeext}; then
rm -rf conftest*
- ac_cv_struct_tm=time.h
+ eval "ac_cv_lib_$ac_lib_var=yes"
else
echo "configure: failed program was:" >&5
cat conftest.$ac_ext >&5
rm -rf conftest*
- ac_cv_struct_tm=sys/time.h
+ eval "ac_cv_lib_$ac_lib_var=no"
fi
rm -f conftest*
-fi
+LIBS="$ac_save_LIBS"
-echo "$ac_t""$ac_cv_struct_tm" 1>&6
-if test $ac_cv_struct_tm = sys/time.h; then
- cat >> confdefs.h <<\EOF
-#define TM_IN_SYS_TIME 1
+fi
+if eval "test \"`echo '$ac_cv_lib_'$ac_lib_var`\" = yes"; then
+ echo "$ac_t""yes" 1>&6
+ ac_tr_lib=HAVE_LIB`echo m | sed -e 's/[^a-zA-Z0-9_]/_/g' \
+ -e 'y/abcdefghijklmnopqrstuvwxyz/ABCDEFGHIJKLMNOPQRSTUVWXYZ/'`
+ cat >> confdefs.h <<EOF
+#define $ac_tr_lib 1
EOF
+ LIBS="-lm $LIBS"
+
+else
+ echo "$ac_t""no" 1>&6
fi
if test ${CC-gcc} = gcc; then
- rd_cv_prog_hpcc=no
-oCFLAGS_EXTRA=$CFLAGS_EXTRA
-CFLAGS_EXTRA="$CFLAGS_EXTRA -Wall -pedantic -fPIC"
-echo $ac_n "checking if we can use GCC-specific compiler options""... $ac_c" 1>&6
-echo "configure:1371: checking if we can use GCC-specific compiler options" >&5
+ oCFLAGS=$CFLAGS
+ CFLAGS="$CFLAGS -Wall -pedantic"
+ echo $ac_n "checking if we can use GCC-specific compiler options""... $ac_c" 1>&6
+echo "configure:1997: checking if we can use GCC-specific compiler options" >&5
if eval "test \"`echo '$''{'rd_cv_gcc_opt'+set}'`\" = set"; then
echo $ac_n "(cached) $ac_c" 1>&6
else
-cat > conftest.$ac_ext <<EOF
-#line 1377 "configure"
+ cat > conftest.$ac_ext <<EOF
+#line 2003 "configure"
#include "confdefs.h"
int main() {
return 0
; return 0; }
EOF
-if { (eval echo configure:1384: \"$ac_compile\") 1>&5; (eval $ac_compile) 2>&5; }; then
+if { (eval echo configure:2010: \"$ac_compile\") 1>&5; (eval $ac_compile) 2>&5; }; then
rm -rf conftest*
rd_cv_gcc_opt=yes
else
@@ -1390,47 +2016,22 @@
rd_cv_gcc_opt=no
fi
rm -f conftest*
-
+
fi
echo "$ac_t""$rd_cv_gcc_opt" 1>&6
-if test $rd_cv_gcc_opt = no; then
- CFLAGS_EXTRA=$oCFLAGS_EXTRA
-fi
-
-else
-echo $ac_n "checking if we should use HP compiler options""... $ac_c" 1>&6
-echo "configure:1404: checking if we should use HP compiler options" >&5
-if eval "test \"`echo '$''{'rd_cv_prog_hpcc'+set}'`\" = set"; then
- echo $ac_n "(cached) $ac_c" 1>&6
-else
-
-cat > conftest.c <<EOF
-#ifdef _HPUX_SOURCE
- yes;
-#endif
-EOF
-if { ac_try='${CC-cc} -Ae -z -E conftest.c'; { (eval echo configure:1414: \"$ac_try\") 1>&5; (eval $ac_try) 2>&5; }; } | egrep yes >/dev/null 2>&1
-then
- rd_cv_prog_hpcc=yes
-else
- rd_cv_prog_hpcc=no
-fi
-fi
-
-echo "$ac_t""$rd_cv_prog_hpcc" 1>&6
-fi
-if test $rd_cv_prog_hpcc = yes; then
- CFLAGS_EXTRA="$CFLAGS_EXTRA -Ae -z"
+ if test $rd_cv_gcc_opt = no; then
+ CFLAGS=$oCFLAGS
+ fi
fi
echo $ac_n "checking for strftime""... $ac_c" 1>&6
-echo "configure:1429: checking for strftime" >&5
+echo "configure:2030: checking for strftime" >&5
if eval "test \"`echo '$''{'ac_cv_func_strftime'+set}'`\" = set"; then
echo $ac_n "(cached) $ac_c" 1>&6
else
cat > conftest.$ac_ext <<EOF
-#line 1434 "configure"
+#line 2035 "configure"
#include "confdefs.h"
/* System header to define __stub macros and hopefully few prototypes,
which can conflict with char strftime(); below. */
@@ -1453,7 +2054,7 @@
; return 0; }
EOF
-if { (eval echo configure:1457: \"$ac_link\") 1>&5; (eval $ac_link) 2>&5; } && test -s conftest${ac_exeext}; then
+if { (eval echo configure:2058: \"$ac_link\") 1>&5; (eval $ac_link) 2>&5; } && test -s conftest${ac_exeext}; then
rm -rf conftest*
eval "ac_cv_func_strftime=yes"
else
@@ -1475,7 +2076,7 @@
echo "$ac_t""no" 1>&6
# strftime is in -lintl on SCO UNIX.
echo $ac_n "checking for strftime in -lintl""... $ac_c" 1>&6
-echo "configure:1479: checking for strftime in -lintl" >&5
+echo "configure:2080: checking for strftime in -lintl" >&5
ac_lib_var=`echo intl'_'strftime | sed 'y%./+-%__p_%'`
if eval "test \"`echo '$''{'ac_cv_lib_$ac_lib_var'+set}'`\" = set"; then
echo $ac_n "(cached) $ac_c" 1>&6
@@ -1483,7 +2084,7 @@
ac_save_LIBS="$LIBS"
LIBS="-lintl $LIBS"
cat > conftest.$ac_ext <<EOF
-#line 1487 "configure"
+#line 2088 "configure"
#include "confdefs.h"
/* Override any gcc2 internal prototype to avoid an error. */
/* We use char because int might match the return type of a gcc2
@@ -1494,7 +2095,7 @@
strftime()
; return 0; }
EOF
-if { (eval echo configure:1498: \"$ac_link\") 1>&5; (eval $ac_link) 2>&5; } && test -s conftest${ac_exeext}; then
+if { (eval echo configure:2099: \"$ac_link\") 1>&5; (eval $ac_link) 2>&5; } && test -s conftest${ac_exeext}; then
rm -rf conftest*
eval "ac_cv_lib_$ac_lib_var=yes"
else
@@ -1521,12 +2122,12 @@
fi
echo $ac_n "checking for vprintf""... $ac_c" 1>&6
-echo "configure:1525: checking for vprintf" >&5
+echo "configure:2126: checking for vprintf" >&5
if eval "test \"`echo '$''{'ac_cv_func_vprintf'+set}'`\" = set"; then
echo $ac_n "(cached) $ac_c" 1>&6
else
cat > conftest.$ac_ext <<EOF
-#line 1530 "configure"
+#line 2131 "configure"
#include "confdefs.h"
/* System header to define __stub macros and hopefully few prototypes,
which can conflict with char vprintf(); below. */
@@ -1549,7 +2150,7 @@
; return 0; }
EOF
-if { (eval echo configure:1553: \"$ac_link\") 1>&5; (eval $ac_link) 2>&5; } && test -s conftest${ac_exeext}; then
+if { (eval echo configure:2154: \"$ac_link\") 1>&5; (eval $ac_link) 2>&5; } && test -s conftest${ac_exeext}; then
rm -rf conftest*
eval "ac_cv_func_vprintf=yes"
else
@@ -1573,12 +2174,12 @@
if test "$ac_cv_func_vprintf" != yes; then
echo $ac_n "checking for _doprnt""... $ac_c" 1>&6
-echo "configure:1577: checking for _doprnt" >&5
+echo "configure:2178: checking for _doprnt" >&5
if eval "test \"`echo '$''{'ac_cv_func__doprnt'+set}'`\" = set"; then
echo $ac_n "(cached) $ac_c" 1>&6
else
cat > conftest.$ac_ext <<EOF
-#line 1582 "configure"
+#line 2183 "configure"
#include "confdefs.h"
/* System header to define __stub macros and hopefully few prototypes,
which can conflict with char _doprnt(); below. */
@@ -1601,7 +2202,7 @@
; return 0; }
EOF
-if { (eval echo configure:1605: \"$ac_link\") 1>&5; (eval $ac_link) 2>&5; } && test -s conftest${ac_exeext}; then
+if { (eval echo configure:2206: \"$ac_link\") 1>&5; (eval $ac_link) 2>&5; } && test -s conftest${ac_exeext}; then
rm -rf conftest*
eval "ac_cv_func__doprnt=yes"
else
@@ -1626,15 +2227,15 @@
fi
-for ac_func in mktime getrusage gettimeofday
+for ac_func in snprintf fpclass isnan finite isinf memmove strchr mktime getrusage gettimeofday
do
echo $ac_n "checking for $ac_func""... $ac_c" 1>&6
-echo "configure:1633: checking for $ac_func" >&5
+echo "configure:2234: checking for $ac_func" >&5
if eval "test \"`echo '$''{'ac_cv_func_$ac_func'+set}'`\" = set"; then
echo $ac_n "(cached) $ac_c" 1>&6
else
cat > conftest.$ac_ext <<EOF
-#line 1638 "configure"
+#line 2239 "configure"
#include "confdefs.h"
/* System header to define __stub macros and hopefully few prototypes,
which can conflict with char $ac_func(); below. */
@@ -1657,7 +2258,7 @@
; return 0; }
EOF
-if { (eval echo configure:1661: \"$ac_link\") 1>&5; (eval $ac_link) 2>&5; } && test -s conftest${ac_exeext}; then
+if { (eval echo configure:2262: \"$ac_link\") 1>&5; (eval $ac_link) 2>&5; } && test -s conftest${ac_exeext}; then
rm -rf conftest*
eval "ac_cv_func_$ac_func=yes"
else
@@ -1682,9 +2283,57 @@
done
-oCFLAGS_EXTRA=$CFLAGS_EXTRA
-echo $ac_n "checking if the compiler does proper IEEE math""... $ac_c" 1>&6
-echo "configure:1688: checking if the compiler does proper IEEE math" >&5
+
+echo $ac_n "checking if realloc can deal with NULL""... $ac_c" 1>&6
+echo "configure:2289: checking if realloc can deal with NULL" >&5
+if eval "test \"`echo '$''{'rd_cv_null_realloc'+set}'`\" = set"; then
+ echo $ac_n "(cached) $ac_c" 1>&6
+else
+ if test "$cross_compiling" = yes; then
+ :
+else
+ cat > conftest.$ac_ext <<EOF
+#line 2297 "configure"
+#include "confdefs.h"
+#include <stdlib.h>
+ int main(void){
+ char *x = NULL;
+ x = realloc (x,10);
+ if (x==NULL) return 1;
+ return 0;
+ }
+EOF
+if { (eval echo configure:2307: \"$ac_link\") 1>&5; (eval $ac_link) 2>&5; } && test -s conftest${ac_exeext} && (./conftest; exit) 2>/dev/null
+then
+ rd_cv_null_realloc=yes
+else
+ echo "configure: failed program was:" >&5
+ cat conftest.$ac_ext >&5
+ rm -fr conftest*
+ rd_cv_null_realloc=nope
+fi
+rm -fr conftest*
+fi
+
+fi
+
+echo "$ac_t""$rd_cv_null_realloc" 1>&6
+
+if test x"$rd_cv_null_realloc" = xnope; then
+cat >> confdefs.h <<\EOF
+#define NO_NULL_REALLOC 1
+EOF
+
+fi
+
+
+
+
+oCFLAGS=$CFLAGS
+unset CFLAGS
+
+echo $ac_n "checking if IEEE math works out of the box""... $ac_c" 1>&6
+echo "configure:2337: checking if IEEE math works out of the box" >&5
if eval "test \"`echo '$''{'rd_cv_ieee_works'+set}'`\" = set"; then
echo $ac_n "(cached) $ac_c" 1>&6
else
@@ -1692,25 +2341,62 @@
:
else
cat > conftest.$ac_ext <<EOF
-#line 1696 "configure"
+#line 2345 "configure"
#include "confdefs.h"
+
+
+#if (! defined(HAVE_FINITE) && defined(HAVE_ISNAN) && defined(HAVE_ISINF))
+# define HAVE_FINITE 1
+# define finite(a) (! isnan(a) && ! isinf(a))
+#endif
+
+#if (! defined(HAVE_ISINF) && defined(HAVE_FPCLASS))
+# define HAVE_ISINF 1
+# include <ieeefp.h>
+# define isinf(a) (fpclass(a) == FP_NINF || fpclass(a) == FP_PINF)
+#endif
+
+#include <stdio.h>
int main(void){
- double a=0.0/0.0,b=0.0,c;
- c=a/b;if (c==a) c=a;return isnan(a/c)?0:1;
- }
+ double nan,inf,c,d;
+ ;
+ /* some math to see if we get a segfault; */
+ nan=0.0/0.0;
+ inf=1.0/0.0;
+ c = 1.0;
+ c = c / 0.0; /* try getting fpe */
+ c = inf + nan;
+ c = inf / nan;
+ if (! isnan(nan)) {printf ("isnan(NaN) ... "); return 1;}
+ if (nan == nan) {printf ("nan != nan ..."); return 1;}
+ if (! isinf(inf)) {printf ("isinf(oo) ... "); return 1;}
+ if (! isinf(-inf)) {printf ("isinf(-oo) ... "); return 1;}
+ if (! inf > 0) {printf ("inf > 0 ... "); return 1;}
+ if (! -inf < 0) {printf ("inf < 0 ... "); return 1;}
+ return 0;
+}
EOF
-if { (eval echo configure:1703: \"$ac_link\") 1>&5; (eval $ac_link) 2>&5; } && test -s conftest${ac_exeext} && (./conftest; exit) 2>/dev/null
+if { (eval echo configure:2380: \"$ac_link\") 1>&5; (eval $ac_link) 2>&5; } && test -s conftest${ac_exeext} && (./conftest; exit) 2>/dev/null
then
rd_cv_ieee_works=yes
else
echo "configure: failed program was:" >&5
cat conftest.$ac_ext >&5
rm -fr conftest*
-
-echo "$ac_t""no it does not" 1>&6
-CFLAGS_EXTRA="$CFLAGS_EXTRA -ieee"
-echo $ac_n "checking if the compiler does proper IEEE math with -ieee""... $ac_c" 1>&6
-echo "configure:1714: checking if the compiler does proper IEEE math with -ieee" >&5
+ rd_cv_ieee_works=no
+fi
+rm -fr conftest*
+fi
+
+fi
+
+echo "$ac_t""$rd_cv_ieee_works" 1>&6
+
+if test "$rd_cv_ieee_works" != yes ; then
+
+CFLAGS=-ieee
+echo $ac_n "checking if IEEE math works with the -ieee switch""... $ac_c" 1>&6
+echo "configure:2400: checking if IEEE math works with the -ieee switch" >&5
if eval "test \"`echo '$''{'rd_cv_ieee_switch'+set}'`\" = set"; then
echo $ac_n "(cached) $ac_c" 1>&6
else
@@ -1718,29 +2404,49 @@
:
else
cat > conftest.$ac_ext <<EOF
-#line 1722 "configure"
+#line 2408 "configure"
#include "confdefs.h"
+
+
+#if (! defined(HAVE_FINITE) && defined(HAVE_ISNAN) && defined(HAVE_ISINF))
+# define HAVE_FINITE 1
+# define finite(a) (! isnan(a) && ! isinf(a))
+#endif
+
+#if (! defined(HAVE_ISINF) && defined(HAVE_FPCLASS))
+# define HAVE_ISINF 1
+# include <ieeefp.h>
+# define isinf(a) (fpclass(a) == FP_NINF || fpclass(a) == FP_PINF)
+#endif
+
+#include <stdio.h>
int main(void){
- double a=0.0/0.0,b=0.0,c;
- c=a/b;if (c==a) c=a;return isnan(a/c)?0:1;
- }
+ double nan,inf,c,d;
+ ;
+ /* some math to see if we get a segfault; */
+ nan=0.0/0.0;
+ inf=1.0/0.0;
+ c = 1.0;
+ c = c / 0.0; /* try getting fpe */
+ c = inf + nan;
+ c = inf / nan;
+ if (! isnan(nan)) {printf ("isnan(NaN) ... "); return 1;}
+ if (nan == nan) {printf ("nan != nan ..."); return 1;}
+ if (! isinf(inf)) {printf ("isinf(oo) ... "); return 1;}
+ if (! isinf(-inf)) {printf ("isinf(-oo) ... "); return 1;}
+ if (! inf > 0) {printf ("inf > 0 ... "); return 1;}
+ if (! -inf < 0) {printf ("inf < 0 ... "); return 1;}
+ return 0;
+}
EOF
-if { (eval echo configure:1729: \"$ac_link\") 1>&5; (eval $ac_link) 2>&5; } && test -s conftest${ac_exeext} && (./conftest; exit) 2>/dev/null
+if { (eval echo configure:2443: \"$ac_link\") 1>&5; (eval $ac_link) 2>&5; } && test -s conftest${ac_exeext} && (./conftest; exit) 2>/dev/null
then
rd_cv_ieee_switch=yes
else
echo "configure: failed program was:" >&5
cat conftest.$ac_ext >&5
rm -fr conftest*
- rd_cv_ieee_switch=no;
-echo "$ac_t""nope" 1>&6
-echo "--------------------------------------------------------------"
-echo "Your Compiler does not do propper IEEE math ... "
-echo "Please find out how to make IEEE math work with your Compiler"
-echo "And let me know (oetiker at ee.ethz.ch)"
-echo ""
-exit 1
-
+ rd_cv_ieee_switch=no
fi
rm -fr conftest*
fi
@@ -1749,20 +2455,302 @@
echo "$ac_t""$rd_cv_ieee_switch" 1>&6
+ if test "$rd_cv_ieee_switch" != yes ; then
+
+CFLAGS=-mieee
+echo $ac_n "checking if IEEE math works with the -mieee switch""... $ac_c" 1>&6
+echo "configure:2463: checking if IEEE math works with the -mieee switch" >&5
+if eval "test \"`echo '$''{'rd_cv_ieee_mswitch'+set}'`\" = set"; then
+ echo $ac_n "(cached) $ac_c" 1>&6
+else
+ if test "$cross_compiling" = yes; then
+ :
+else
+ cat > conftest.$ac_ext <<EOF
+#line 2471 "configure"
+#include "confdefs.h"
+
+
+#if (! defined(HAVE_FINITE) && defined(HAVE_ISNAN) && defined(HAVE_ISINF))
+# define HAVE_FINITE 1
+# define finite(a) (! isnan(a) && ! isinf(a))
+#endif
+
+#if (! defined(HAVE_ISINF) && defined(HAVE_FPCLASS))
+# define HAVE_ISINF 1
+# include <ieeefp.h>
+# define isinf(a) (fpclass(a) == FP_NINF || fpclass(a) == FP_PINF)
+#endif
+
+#include <stdio.h>
+int main(void){
+ double nan,inf,c,d;
+ ;
+ /* some math to see if we get a segfault; */
+ nan=0.0/0.0;
+ inf=1.0/0.0;
+ c = 1.0;
+ c = c / 0.0; /* try getting fpe */
+ c = inf + nan;
+ c = inf / nan;
+ if (! isnan(nan)) {printf ("isnan(NaN) ... "); return 1;}
+ if (nan == nan) {printf ("nan != nan ..."); return 1;}
+ if (! isinf(inf)) {printf ("isinf(oo) ... "); return 1;}
+ if (! isinf(-inf)) {printf ("isinf(-oo) ... "); return 1;}
+ if (! inf > 0) {printf ("inf > 0 ... "); return 1;}
+ if (! -inf < 0) {printf ("inf < 0 ... "); return 1;}
+ return 0;
+}
+EOF
+if { (eval echo configure:2506: \"$ac_link\") 1>&5; (eval $ac_link) 2>&5; } && test -s conftest${ac_exeext} && (./conftest; exit) 2>/dev/null
+then
+ rd_cv_ieee_mswitch=yes
+else
+ echo "configure: failed program was:" >&5
+ cat conftest.$ac_ext >&5
+ rm -fr conftest*
+ rd_cv_ieee_mswitch=no
fi
rm -fr conftest*
fi
fi
-echo "$ac_t""$rd_cv_ieee_works" 1>&6
+echo "$ac_t""$rd_cv_ieee_mswitch" 1>&6
+
+ if test "$rd_cv_ieee_mswitch" != yes ; then
+
+CFLAGS="-q float=rndsngl"
+echo $ac_n "checking if IEEE math works with the -q float=rndsngl switch""... $ac_c" 1>&6
+echo "configure:2526: checking if IEEE math works with the -q float=rndsngl switch" >&5
+if eval "test \"`echo '$''{'rd_cv_ieee_qswitch'+set}'`\" = set"; then
+ echo $ac_n "(cached) $ac_c" 1>&6
+else
+ if test "$cross_compiling" = yes; then
+ :
+else
+ cat > conftest.$ac_ext <<EOF
+#line 2534 "configure"
+#include "confdefs.h"
+
+
+#if (! defined(HAVE_FINITE) && defined(HAVE_ISNAN) && defined(HAVE_ISINF))
+# define HAVE_FINITE 1
+# define finite(a) (! isnan(a) && ! isinf(a))
+#endif
+
+#if (! defined(HAVE_ISINF) && defined(HAVE_FPCLASS))
+# define HAVE_ISINF 1
+# include <ieeefp.h>
+# define isinf(a) (fpclass(a) == FP_NINF || fpclass(a) == FP_PINF)
+#endif
+
+#include <stdio.h>
+int main(void){
+ double nan,inf,c,d;
+ ;
+ /* some math to see if we get a segfault; */
+ nan=0.0/0.0;
+ inf=1.0/0.0;
+ c = 1.0;
+ c = c / 0.0; /* try getting fpe */
+ c = inf + nan;
+ c = inf / nan;
+ if (! isnan(nan)) {printf ("isnan(NaN) ... "); return 1;}
+ if (nan == nan) {printf ("nan != nan ..."); return 1;}
+ if (! isinf(inf)) {printf ("isinf(oo) ... "); return 1;}
+ if (! isinf(-inf)) {printf ("isinf(-oo) ... "); return 1;}
+ if (! inf > 0) {printf ("inf > 0 ... "); return 1;}
+ if (! -inf < 0) {printf ("inf < 0 ... "); return 1;}
+ return 0;
+}
+EOF
+if { (eval echo configure:2569: \"$ac_link\") 1>&5; (eval $ac_link) 2>&5; } && test -s conftest${ac_exeext} && (./conftest; exit) 2>/dev/null
+then
+ rd_cv_ieee_qswitch=yes
+else
+ echo "configure: failed program was:" >&5
+ cat conftest.$ac_ext >&5
+ rm -fr conftest*
+ rd_cv_ieee_qswitch=no
+fi
+rm -fr conftest*
+fi
+
+fi
+
+echo "$ac_t""$rd_cv_ieee_qswitch" 1>&6
+
+ if test "$rd_cv_ieee_qswitch" != yes ; then
+
+unset CFLAGS
+
+echo $ac_n "checking if IEEE math works with fpsetmask(0)""... $ac_c" 1>&6
+echo "configure:2590: checking if IEEE math works with fpsetmask(0)" >&5
+if eval "test \"`echo '$''{'rd_cv_ieee_mask'+set}'`\" = set"; then
+ echo $ac_n "(cached) $ac_c" 1>&6
+else
+ if test "$cross_compiling" = yes; then
+ :
+else
+ cat > conftest.$ac_ext <<EOF
+#line 2598 "configure"
+#include "confdefs.h"
+#include <floatingpoint.h>
+
+#if (! defined(HAVE_FINITE) && defined(HAVE_ISNAN) && defined(HAVE_ISINF))
+# define HAVE_FINITE 1
+# define finite(a) (! isnan(a) && ! isinf(a))
+#endif
+
+#if (! defined(HAVE_ISINF) && defined(HAVE_FPCLASS))
+# define HAVE_ISINF 1
+# include <ieeefp.h>
+# define isinf(a) (fpclass(a) == FP_NINF || fpclass(a) == FP_PINF)
+#endif
+
+#include <stdio.h>
+int main(void){
+ double nan,inf,c,d;
+ fpsetmask(0);
+ /* some math to see if we get a segfault; */
+ nan=0.0/0.0;
+ inf=1.0/0.0;
+ c = 1.0;
+ c = c / 0.0; /* try getting fpe */
+ c = inf + nan;
+ c = inf / nan;
+ if (! isnan(nan)) {printf ("isnan(NaN) ... "); return 1;}
+ if (nan == nan) {printf ("nan != nan ..."); return 1;}
+ if (! isinf(inf)) {printf ("isinf(oo) ... "); return 1;}
+ if (! isinf(-inf)) {printf ("isinf(-oo) ... "); return 1;}
+ if (! inf > 0) {printf ("inf > 0 ... "); return 1;}
+ if (! -inf < 0) {printf ("inf < 0 ... "); return 1;}
+ return 0;
+}
+EOF
+if { (eval echo configure:2633: \"$ac_link\") 1>&5; (eval $ac_link) 2>&5; } && test -s conftest${ac_exeext} && (./conftest; exit) 2>/dev/null
+then
+ rd_cv_ieee_mask=yes
+else
+ echo "configure: failed program was:" >&5
+ cat conftest.$ac_ext >&5
+ rm -fr conftest*
+ rd_cv_ieee_mask=no
+fi
+rm -fr conftest*
+fi
+
+fi
+
+echo "$ac_t""$rd_cv_ieee_mask" 1>&6
+
+ if test "$rd_cv_ieee_mask" != yes ; then
+
+echo $ac_n "checking if IEEE math works with signal(SIGFPE,SIG_IGN)""... $ac_c" 1>&6
+echo "configure:2652: checking if IEEE math works with signal(SIGFPE,SIG_IGN)" >&5
+if eval "test \"`echo '$''{'rd_cv_ieee_sigfpe'+set}'`\" = set"; then
+ echo $ac_n "(cached) $ac_c" 1>&6
+else
+ if test "$cross_compiling" = yes; then
+ :
+else
+ cat > conftest.$ac_ext <<EOF
+#line 2660 "configure"
+#include "confdefs.h"
+#include <signal.h>
+
+#if (! defined(HAVE_FINITE) && defined(HAVE_ISNAN) && defined(HAVE_ISINF))
+# define HAVE_FINITE 1
+# define finite(a) (! isnan(a) && ! isinf(a))
+#endif
+
+#if (! defined(HAVE_ISINF) && defined(HAVE_FPCLASS))
+# define HAVE_ISINF 1
+# include <ieeefp.h>
+# define isinf(a) (fpclass(a) == FP_NINF || fpclass(a) == FP_PINF)
+#endif
+
+#include <stdio.h>
+int main(void){
+ double nan,inf,c,d;
+ signal(SIGFPE,SIG_IGN);
+ /* some math to see if we get a segfault; */
+ nan=0.0/0.0;
+ inf=1.0/0.0;
+ c = 1.0;
+ c = c / 0.0; /* try getting fpe */
+ c = inf + nan;
+ c = inf / nan;
+ if (! isnan(nan)) {printf ("isnan(NaN) ... "); return 1;}
+ if (nan == nan) {printf ("nan != nan ..."); return 1;}
+ if (! isinf(inf)) {printf ("isinf(oo) ... "); return 1;}
+ if (! isinf(-inf)) {printf ("isinf(-oo) ... "); return 1;}
+ if (! inf > 0) {printf ("inf > 0 ... "); return 1;}
+ if (! -inf < 0) {printf ("inf < 0 ... "); return 1;}
+ return 0;
+}
+EOF
+if { (eval echo configure:2695: \"$ac_link\") 1>&5; (eval $ac_link) 2>&5; } && test -s conftest${ac_exeext} && (./conftest; exit) 2>/dev/null
+then
+ rd_cv_ieee_sigfpe=yes
+else
+ echo "configure: failed program was:" >&5
+ cat conftest.$ac_ext >&5
+ rm -fr conftest*
+ rd_cv_ieee_sigfpe=no
+fi
+rm -fr conftest*
+fi
+
+fi
+
+echo "$ac_t""$rd_cv_ieee_sigfpe" 1>&6
-CFLAGS_EXTRA=$oCFLAGS_EXTRA
+ if test "$rd_cv_ieee_sigfpe" != yes ; then
+
+
+echo "--------------------------------------------------------------"
+echo "Your Compiler does not do propper IEEE math ... "
+echo "Please find out how to make IEEE math work with your Compiler"
+echo "and let me know (oetiker at ee.ethz.ch)"
+echo "Check config.log to see what went wrong ..."
+echo ""
+exit 1
+ fi
+ fi
+ fi
+ fi
+ fi
+fi
+
+CFLAGS=$oCFLAGS
+
+if test x$rd_cv_ieee_sigfpe = xyes; then
+ cat >> confdefs.h <<\EOF
+#define MUST_DISABLE_SIGFPE 1
+EOF
+
+fi
+
+if test x$rd_cv_ieee_mask = xyes; then
+ cat >> confdefs.h <<\EOF
+#define MUST_DISABLE_FPMASK 1
+EOF
+
+fi
if test x$rd_cv_ieee_switch = xyes; then
- CFLAGS_EXTRA="$CFLAGS_EXTRA -ieee"
+ CFLAGS="$CFLAGS -ieee"
+fi
+
+if test x$rd_cv_ieee_mswitch = xyes; then
+ CFLAGS="$CFLAGS -mieee"
fi
+if test x$rd_cv_ieee_qswitch = xyes; then
+ CFLAGS="$CFLAGS -q float=rndsngl"
+fi
@@ -1867,7 +2855,26 @@
ac_given_srcdir=$srcdir
ac_given_INSTALL="$INSTALL"
-trap 'rm -fr `echo "Makefile src/Makefile gd1.2/Makefile doc/GNUmakefile perl-shared/examples/shared-demo.pl perl-piped/examples/piped-demo.pl config.h" | sed "s/:[^ ]*//g"` conftest*; exit 1' 1 2 15
+trap 'rm -fr `echo "cgilib-0.4/Makefile \
+ config/Makefile \
+ examples/shared-demo.pl \
+ examples/piped-demo.pl \
+ examples/stripes.pl \
+ examples/bigtops.pl \
+ examples/minmax.pl \
+ examples/cgi-demo.cgi \
+ examples/Makefile \
+ contrib/Makefile \
+ contrib/trytime/Makefile \
+ contrib/log2rrd/Makefile \
+ contrib/log2rrd/log2rrd.pl \
+ contrib/rrd-file-icon/Makefile \
+ doc/Makefile \
+ gd1.3/Makefile \
+ libpng-1.0.3/Makefile \
+ zlib-1.1.3/Makefile \
+ src/Makefile \
+ Makefile config/config.h" | sed "s/:[^ ]*//g"` conftest*; exit 1' 1 2 15
EOF
cat >> $CONFIG_STATUS <<EOF
@@ -1899,16 +2906,45 @@
s%@oldincludedir@%$oldincludedir%g
s%@infodir@%$infodir%g
s%@mandir@%$mandir%g
-s%@CC@%$CC%g
-s%@CPP@%$CPP%g
+s%@host@%$host%g
+s%@host_alias@%$host_alias%g
+s%@host_cpu@%$host_cpu%g
+s%@host_vendor@%$host_vendor%g
+s%@host_os@%$host_os%g
+s%@target@%$target%g
+s%@target_alias@%$target_alias%g
+s%@target_cpu@%$target_cpu%g
+s%@target_vendor@%$target_vendor%g
+s%@target_os@%$target_os%g
+s%@build@%$build%g
+s%@build_alias@%$build_alias%g
+s%@build_cpu@%$build_cpu%g
+s%@build_vendor@%$build_vendor%g
+s%@build_os@%$build_os%g
s%@INSTALL_PROGRAM@%$INSTALL_PROGRAM%g
s%@INSTALL_SCRIPT@%$INSTALL_SCRIPT%g
s%@INSTALL_DATA@%$INSTALL_DATA%g
-s%@LN_S@%$LN_S%g
+s%@PACKAGE@%$PACKAGE%g
+s%@VERSION@%$VERSION%g
+s%@ACLOCAL@%$ACLOCAL%g
+s%@AUTOCONF@%$AUTOCONF%g
+s%@AUTOMAKE@%$AUTOMAKE%g
+s%@AUTOHEADER@%$AUTOHEADER%g
+s%@MAKEINFO@%$MAKEINFO%g
s%@SET_MAKE@%$SET_MAKE%g
-s%@RANLIB@%$RANLIB%g
+s%@CGI_LIB_DIR@%$CGI_LIB_DIR%g
+s%@GD_LIB_DIR@%$GD_LIB_DIR%g
+s%@PNG_LIB_DIR@%$PNG_LIB_DIR%g
+s%@ZLIB_LIB_DIR@%$ZLIB_LIB_DIR%g
s%@PERL@%$PERL%g
-s%@CFLAGS_EXTRA@%$CFLAGS_EXTRA%g
+s%@COMP_PERL@%$COMP_PERL%g
+s%@CC@%$CC%g
+s%@CPP@%$CPP%g
+s%@RANLIB@%$RANLIB%g
+s%@LD@%$LD%g
+s%@NM@%$NM%g
+s%@LN_S@%$LN_S%g
+s%@LIBTOOL@%$LIBTOOL%g
CEOF
EOF
@@ -1950,7 +2986,26 @@
cat >> $CONFIG_STATUS <<EOF
-CONFIG_FILES=\${CONFIG_FILES-"Makefile src/Makefile gd1.2/Makefile doc/GNUmakefile perl-shared/examples/shared-demo.pl perl-piped/examples/piped-demo.pl"}
+CONFIG_FILES=\${CONFIG_FILES-"cgilib-0.4/Makefile \
+ config/Makefile \
+ examples/shared-demo.pl \
+ examples/piped-demo.pl \
+ examples/stripes.pl \
+ examples/bigtops.pl \
+ examples/minmax.pl \
+ examples/cgi-demo.cgi \
+ examples/Makefile \
+ contrib/Makefile \
+ contrib/trytime/Makefile \
+ contrib/log2rrd/Makefile \
+ contrib/log2rrd/log2rrd.pl \
+ contrib/rrd-file-icon/Makefile \
+ doc/Makefile \
+ gd1.3/Makefile \
+ libpng-1.0.3/Makefile \
+ zlib-1.1.3/Makefile \
+ src/Makefile \
+ Makefile"}
EOF
cat >> $CONFIG_STATUS <<\EOF
for ac_file in .. $CONFIG_FILES; do if test "x$ac_file" != x..; then
@@ -2031,7 +3086,7 @@
if test "${CONFIG_HEADERS+set}" != set; then
EOF
cat >> $CONFIG_STATUS <<EOF
- CONFIG_HEADERS="config.h"
+ CONFIG_HEADERS="config/config.h"
EOF
cat >> $CONFIG_STATUS <<\EOF
fi
@@ -2119,9 +3174,12 @@
EOF
cat >> $CONFIG_STATUS <<EOF
+
EOF
cat >> $CONFIG_STATUS <<\EOF
-chmod +x perl-*/examples/*.pl
+test -z "$CONFIG_HEADERS" || echo timestamp > config/stamp-h
+\
+ chmod +x examples/*.cgi examples/*.pl contrib/*/*.pl
exit 0
EOF
chmod +x $CONFIG_STATUS
@@ -2130,9 +3188,32 @@
echo $ac_n "checking in""... $ac_c" 1>&6
-echo "configure:2134: checking in" >&5
+echo "configure:3192: checking in" >&5
echo "$ac_t""and out again" 1>&6
-echo $ac_n "ordering CD from http://cdnow.com/gift/oetiker@ee.ethz.ch""... $ac_c" 1>&6
-sleep 3
-echo "$ac_t""just kidding ;-)" 1>&6
+echo $ac_n "ordering CD from http://cdnow.com/gift/oetiker@ee.ethz.ch $ac_c" 1>&6
+sleep 1
+echo $ac_n ".$ac_c" 1>&6
+sleep 1
+echo $ac_n ".$ac_c" 1>&6
+sleep 1
+echo $ac_n ".$ac_c" 1>&6
+sleep 1
+echo $ac_n ".$ac_c" 1>&6
+sleep 1
+echo "$ac_t"" just kidding ;-)" 1>&6
+echo
+echo "----------------------------------------------------------------"
+echo "Config is DONE!"
+echo
+echo "Type 'make' to compile the software and use 'make install' to "
+echo "install everything to $prefix. If you want to install the perl"
+echo "modules in site-perl, try 'make site-perl-install'."
+echo
+echo " ... that wishlist mentioned above does realy exist. So if"
+echo "you feel like showing your appreciation for rrdtool this is the"
+echo "place to go. :-)"
+echo
+echo " -- Tobi Oetiker <oetiker at ee.ethz.ch>"
+echo "----------------------------------------------------------------"
+
Modified: trunk/orca/packages/rrdtool-1.0.7.2/Makefile.in
==============================================================================
--- trunk/orca/packages/rrdtool-1.0.7.2/Makefile.in (original)
+++ trunk/orca/packages/rrdtool-1.0.7.2/Makefile.in Sat Jul 13 19:22:31 2002
@@ -1,53 +1,434 @@
-# things that the GNU standards document suggests all makefiles
-# should have.
-SHELL = /bin/sh
- at SET_MAKE@
+# Makefile.in generated automatically by automake 1.4 from Makefile.am
+
+# Copyright (C) 1994, 1995-8, 1999 Free Software Foundation, Inc.
+# This Makefile.in is free software; the Free Software Foundation
+# gives unlimited permission to copy and/or distribute it,
+# with or without modifications, as long as this notice is preserved.
+
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY, to the extent permitted by law; without
+# even the implied warranty of MERCHANTABILITY or FITNESS FOR A
+# PARTICULAR PURPOSE.
+
+# build the following subdirectories
+
+
+SHELL = @SHELL@
+
+srcdir = @srcdir@
+top_srcdir = @top_srcdir@
+VPATH = @srcdir@
+prefix = @prefix@
+exec_prefix = @exec_prefix@
+
+bindir = @bindir@
+sbindir = @sbindir@
+libexecdir = @libexecdir@
+datadir = @datadir@
+sysconfdir = @sysconfdir@
+sharedstatedir = @sharedstatedir@
+localstatedir = @localstatedir@
+libdir = @libdir@
+infodir = @infodir@
+mandir = @mandir@
+includedir = @includedir@
+oldincludedir = /usr/include
+
+DESTDIR =
+
+pkgdatadir = $(datadir)/@PACKAGE@
+pkglibdir = $(libdir)/@PACKAGE@
+pkgincludedir = $(includedir)/@PACKAGE@
+
+top_builddir = .
+
+ACLOCAL = @ACLOCAL@
+AUTOMAKE = @AUTOMAKE@
+
INSTALL = @INSTALL@
-PREFIX = @prefix@
+INSTALL_PROGRAM = @INSTALL_PROGRAM@ $(AM_INSTALL_PROGRAM_FLAGS)
+INSTALL_DATA = @INSTALL_DATA@
+INSTALL_SCRIPT = @INSTALL_SCRIPT@
+transform = @program_transform_name@
+
+NORMAL_INSTALL = :
+PRE_INSTALL = :
+POST_INSTALL = :
+NORMAL_UNINSTALL = :
+PRE_UNINSTALL = :
+POST_UNINSTALL = :
+build_alias = @build_alias@
+build_triplet = @build@
+host_alias = @host_alias@
+host_triplet = @host@
+target_alias = @target_alias@
+target_triplet = @target@
+CC = @CC@
CFLAGS = @CFLAGS@
+CGI_LIB_DIR = @CGI_LIB_DIR@
+COMP_PERL = @COMP_PERL@
+CPP = @CPP@
+GD_LIB_DIR = @GD_LIB_DIR@
+LIBTOOL = @LIBTOOL@
+PERL = @PERL@
+PNG_LIB_DIR = @PNG_LIB_DIR@
+RANLIB = @RANLIB@
+ZLIB_LIB_DIR = @ZLIB_LIB_DIR@
+
+SUBDIRS = cgilib-0.4 config gd1.3 zlib-1.1.3 libpng-1.0.3 src doc examples contrib
+
+# the following files are not mentioned in any other Makefile
+EXTRA_DIST = COPYRIGHT CHANGES NT-BUILD-TIPS.txt TODO CONTRIBUTORS perl-piped/MANIFEST perl-piped/README perl-piped/rrdpl.ds? perl-piped/RRDp.pm perl-piped/Makefile.PL perl-piped/t/base.t perl-shared/MANIFEST perl-shared/README perl-shared/RRDs.xs perl-shared/ntmake.pl perl-shared/Makefile.PL perl-shared/t/base.t perl-shared/rrdpl.ds? perl-shared/RRDs.pm
+
+
+#
+CLEANFILES = config.cache
+
+# use relaxed rules when building dists
+AUTOMAKE_OPTIONS = foreign
+
+# where we keep local rules for automake
+ACLOCAL_M4 = $(top_srcdir)/config/aclocal.m4
+AUTOHEADER = @AUTOHEADER@ --localdir=$(top_srcdir)/config
+AUTOCONF = @AUTOCONF@ --localdir=$(top_srcdir)/config
+mkinstalldirs = $(SHELL) $(top_srcdir)/config/mkinstalldirs
+CONFIG_HEADER = ./config/config.h
+CONFIG_CLEAN_FILES =
+DIST_COMMON = README COPYING Makefile.am Makefile.in TODO \
+config/config.h.in config/stamp-h.in configure configure.in
+
+
+PACKAGE = @PACKAGE@
+VERSION = @VERSION@
+
+DISTFILES = $(DIST_COMMON) $(SOURCES) $(HEADERS) $(TEXINFOS) $(EXTRA_DIST)
+
+TAR = tar
+GZIP_ENV = --best
+all: all-redirect
.SUFFIXES:
-.SUFFIXES: .c .o .pl .pm .pod .html .man
+$(srcdir)/Makefile.in: Makefile.am $(top_srcdir)/configure.in $(ACLOCAL_M4)
+ cd $(top_srcdir) && $(AUTOMAKE) --foreign --include-deps Makefile
+
+Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status
+ cd $(top_builddir) \
+ && CONFIG_FILES=$@ CONFIG_HEADERS= $(SHELL) ./config.status
+
+
+config.status: $(srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES)
+ $(SHELL) ./config.status --recheck
+$(srcdir)/configure: $(srcdir)/configure.in $(ACLOCAL_M4) $(CONFIGURE_DEPENDENCIES)
+ cd $(srcdir) && $(AUTOCONF)
+
+config/config.h: config/stamp-h
+ @if test ! -f $@; then \
+ rm -f config/stamp-h; \
+ $(MAKE) config/stamp-h; \
+ else :; fi
+config/stamp-h: $(srcdir)/config/config.h.in $(top_builddir)/config.status
+ cd $(top_builddir) \
+ && CONFIG_FILES= CONFIG_HEADERS=config/config.h \
+ $(SHELL) ./config.status
+ @echo timestamp > config/stamp-h 2> /dev/null
+$(srcdir)/config/config.h.in: $(srcdir)/config/stamp-h.in
+ @if test ! -f $@; then \
+ rm -f $(srcdir)/config/stamp-h.in; \
+ $(MAKE) $(srcdir)/config/stamp-h.in; \
+ else :; fi
+$(srcdir)/config/stamp-h.in: $(top_srcdir)/configure.in $(ACLOCAL_M4)
+ cd $(top_srcdir) && $(AUTOHEADER)
+ @echo timestamp > $(srcdir)/config/stamp-h.in 2> /dev/null
+
+mostlyclean-hdr:
+
+clean-hdr:
+
+distclean-hdr:
+ -rm -f config/config.h
+
+maintainer-clean-hdr:
+
+# This directory's subdirectories are mostly independent; you can cd
+# into them and run `make' without going through this Makefile.
+# To change the values of `make' variables: instead of editing Makefiles,
+# (1) if the variable is set in `config.status', edit `config.status'
+# (which will cause the Makefiles to be regenerated when you run `make');
+# (2) otherwise, pass the desired values on the `make' command line.
+
+ at SET_MAKE@
-all:
- (cd gd1.2 && $(MAKE) CFLAGS="$(CFLAGS)" libgd.a)
- (cd src && $(MAKE) CFLAGS="$(CFLAGS)")
- $(MAKE) perl-piped perl-shared
- (cd doc && $(MAKE))
+all-recursive install-data-recursive install-exec-recursive \
+installdirs-recursive install-recursive uninstall-recursive \
+check-recursive installcheck-recursive info-recursive dvi-recursive:
+ @set fnord $(MAKEFLAGS); amf=$$2; \
+ dot_seen=no; \
+ target=`echo $@ | sed s/-recursive//`; \
+ list='$(SUBDIRS)'; for subdir in $$list; do \
+ echo "Making $$target in $$subdir"; \
+ if test "$$subdir" = "."; then \
+ dot_seen=yes; \
+ local_target="$$target-am"; \
+ else \
+ local_target="$$target"; \
+ fi; \
+ (cd $$subdir && $(MAKE) $(AM_MAKEFLAGS) $$local_target) \
+ || case "$$amf" in *=*) exit 1;; *k*) fail=yes;; *) exit 1;; esac; \
+ done; \
+ if test "$$dot_seen" = "no"; then \
+ $(MAKE) $(AM_MAKEFLAGS) "$$target-am" || exit 1; \
+ fi; test -z "$$fail"
-perl-piped: perl-piped/Makefile
- (cd perl-piped && $(MAKE) OPTIMIZE="$(CFLAGS)")
+mostlyclean-recursive clean-recursive distclean-recursive \
+maintainer-clean-recursive:
+ @set fnord $(MAKEFLAGS); amf=$$2; \
+ dot_seen=no; \
+ rev=''; list='$(SUBDIRS)'; for subdir in $$list; do \
+ rev="$$subdir $$rev"; \
+ test "$$subdir" = "." && dot_seen=yes; \
+ done; \
+ test "$$dot_seen" = "no" && rev=". $$rev"; \
+ target=`echo $@ | sed s/-recursive//`; \
+ for subdir in $$rev; do \
+ echo "Making $$target in $$subdir"; \
+ if test "$$subdir" = "."; then \
+ local_target="$$target-am"; \
+ else \
+ local_target="$$target"; \
+ fi; \
+ (cd $$subdir && $(MAKE) $(AM_MAKEFLAGS) $$local_target) \
+ || case "$$amf" in *=*) exit 1;; *k*) fail=yes;; *) exit 1;; esac; \
+ done && test -z "$$fail"
+tags-recursive:
+ list='$(SUBDIRS)'; for subdir in $$list; do \
+ test "$$subdir" = . || (cd $$subdir && $(MAKE) $(AM_MAKEFLAGS) tags); \
+ done
-perl-shared: perl-shared/Makefile
- (cd perl-shared && $(MAKE) OPTIMIZE="$(CFLAGS)")
+tags: TAGS
+ID: $(HEADERS) $(SOURCES) $(LISP)
+ list='$(SOURCES) $(HEADERS)'; \
+ unique=`for i in $$list; do echo $$i; done | \
+ awk ' { files[$$0] = 1; } \
+ END { for (i in files) print i; }'`; \
+ here=`pwd` && cd $(srcdir) \
+ && mkid -f$$here/ID $$unique $(LISP)
+
+TAGS: tags-recursive $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) $(LISP)
+ tags=; \
+ here=`pwd`; \
+ list='$(SUBDIRS)'; for subdir in $$list; do \
+ if test "$$subdir" = .; then :; else \
+ test -f $$subdir/TAGS && tags="$$tags -i $$here/$$subdir/TAGS"; \
+ fi; \
+ done; \
+ list='$(SOURCES) $(HEADERS)'; \
+ unique=`for i in $$list; do echo $$i; done | \
+ awk ' { files[$$0] = 1; } \
+ END { for (i in files) print i; }'`; \
+ test -z "$(ETAGS_ARGS)$$unique$(LISP)$$tags" \
+ || (cd $(srcdir) && etags $(ETAGS_ARGS) $$tags $$unique $(LISP) -o $$here/TAGS)
+
+mostlyclean-tags:
+
+clean-tags:
+
+distclean-tags:
+ -rm -f TAGS ID
+
+maintainer-clean-tags:
+
+distdir = $(PACKAGE)-$(VERSION)
+top_distdir = $(distdir)
+
+# This target untars the dist file and tries a VPATH configuration. Then
+# it guarantees that the distribution is self-contained by making another
+# tarfile.
+distcheck: dist
+ -rm -rf $(distdir)
+ GZIP=$(GZIP_ENV) $(TAR) zxf $(distdir).tar.gz
+ mkdir $(distdir)/=build
+ mkdir $(distdir)/=inst
+ dc_install_base=`cd $(distdir)/=inst && pwd`; \
+ cd $(distdir)/=build \
+ && ../configure --srcdir=.. --prefix=$$dc_install_base \
+ && $(MAKE) $(AM_MAKEFLAGS) \
+ && $(MAKE) $(AM_MAKEFLAGS) dvi \
+ && $(MAKE) $(AM_MAKEFLAGS) check \
+ && $(MAKE) $(AM_MAKEFLAGS) install \
+ && $(MAKE) $(AM_MAKEFLAGS) installcheck \
+ && $(MAKE) $(AM_MAKEFLAGS) dist
+ -rm -rf $(distdir)
+ @banner="$(distdir).tar.gz is ready for distribution"; \
+ dashes=`echo "$$banner" | sed s/./=/g`; \
+ echo "$$dashes"; \
+ echo "$$banner"; \
+ echo "$$dashes"
+dist: distdir
+ -chmod -R a+r $(distdir)
+ GZIP=$(GZIP_ENV) $(TAR) chozf $(distdir).tar.gz $(distdir)
+ -rm -rf $(distdir)
+dist-all: distdir
+ -chmod -R a+r $(distdir)
+ GZIP=$(GZIP_ENV) $(TAR) chozf $(distdir).tar.gz $(distdir)
+ -rm -rf $(distdir)
+distdir: $(DISTFILES)
+ -rm -rf $(distdir)
+ mkdir $(distdir)
+ -chmod 777 $(distdir)
+ $(mkinstalldirs) $(distdir)/perl-piped $(distdir)/perl-piped/t \
+ $(distdir)/perl-shared $(distdir)/perl-shared/t
+ @for file in $(DISTFILES); do \
+ d=$(srcdir); \
+ if test -d $$d/$$file; then \
+ cp -pr $$/$$file $(distdir)/$$file; \
+ else \
+ test -f $(distdir)/$$file \
+ || ln $$d/$$file $(distdir)/$$file 2> /dev/null \
+ || cp -p $$d/$$file $(distdir)/$$file || :; \
+ fi; \
+ done
+ for subdir in $(SUBDIRS); do \
+ if test "$$subdir" = .; then :; else \
+ test -d $(distdir)/$$subdir \
+ || mkdir $(distdir)/$$subdir \
+ || exit 1; \
+ chmod 777 $(distdir)/$$subdir; \
+ (cd $$subdir && $(MAKE) $(AM_MAKEFLAGS) top_distdir=../$(distdir) distdir=../$(distdir)/$$subdir distdir) \
+ || exit 1; \
+ fi; \
+ done
+info-am:
+info: info-recursive
+dvi-am:
+dvi: dvi-recursive
+check-am: all-am
+check: check-recursive
+installcheck-am:
+installcheck: installcheck-recursive
+install-exec-am:
+install-exec: install-exec-recursive
+
+install-data-am: install-data-local
+install-data: install-data-recursive
+
+install-am: all-am
+ @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am
+install: install-recursive
+uninstall-am:
+uninstall: uninstall-recursive
+all-am: Makefile all-local
+all-redirect: all-recursive
+install-strip:
+ $(MAKE) $(AM_MAKEFLAGS) AM_INSTALL_PROGRAM_FLAGS=-s install
+installdirs: installdirs-recursive
+installdirs-am:
+
+
+mostlyclean-generic:
+
+clean-generic:
+ -test -z "$(CLEANFILES)" || rm -f $(CLEANFILES)
+
+distclean-generic:
+ -rm -f Makefile $(CONFIG_CLEAN_FILES)
+ -rm -f config.cache config.log stamp-h stamp-h[0-9]*
+
+maintainer-clean-generic:
+mostlyclean-am: mostlyclean-hdr mostlyclean-tags mostlyclean-generic
+
+mostlyclean: mostlyclean-recursive
+
+clean-am: clean-hdr clean-tags clean-generic mostlyclean-am clean-local
+
+clean: clean-recursive
+
+distclean-am: distclean-hdr distclean-tags distclean-generic clean-am
+ -rm -f libtool
+
+distclean: distclean-recursive
+ -rm -f config.status
+
+maintainer-clean-am: maintainer-clean-hdr maintainer-clean-tags \
+ maintainer-clean-generic distclean-am
+ @echo "This command is intended for maintainers to use;"
+ @echo "it deletes files that may require special tools to rebuild."
+
+maintainer-clean: maintainer-clean-recursive
+ -rm -f config.status
+
+.PHONY: mostlyclean-hdr distclean-hdr clean-hdr maintainer-clean-hdr \
+install-data-recursive uninstall-data-recursive install-exec-recursive \
+uninstall-exec-recursive installdirs-recursive uninstalldirs-recursive \
+all-recursive check-recursive installcheck-recursive info-recursive \
+dvi-recursive mostlyclean-recursive distclean-recursive clean-recursive \
+maintainer-clean-recursive tags tags-recursive mostlyclean-tags \
+distclean-tags clean-tags maintainer-clean-tags distdir info-am info \
+dvi-am dvi check check-am installcheck-am installcheck install-exec-am \
+install-exec install-data-local install-data-am install-data install-am \
+install uninstall-am uninstall all-local all-redirect all-am all \
+installdirs-am installdirs mostlyclean-generic distclean-generic \
+clean-generic maintainer-clean-generic clean mostlyclean distclean \
+maintainer-clean
+
+
+# lets schedule the perl stuff for installation
+install-data-local:
+ $(INSTALL) -d -m 755 $(prefix)/lib/perl/auto/RRDs
+ $(INSTALL) -m 644 perl-piped/RRDp.pm $(prefix)/lib/perl
+ $(INSTALL) -m 644 perl-shared/RRDs.pm $(prefix)/lib/perl
+ $(INSTALL) -m 644 perl-shared/blib/arch/auto/RRDs/RRDs.bs $(prefix)/lib/perl/auto/RRDs
+ $(INSTALL) -m 755 perl-shared/blib/arch/auto/RRDs/RRDs.so $(prefix)/lib/perl/auto/RRDs
+
+# rules for building the perl module
+perl_piped: perl-piped/Makefile
+ cd perl-piped && $(MAKE) OPTIMIZE="$(CFLAGS)"
perl-piped/Makefile: perl-piped/Makefile.PL
- (cd perl-piped && @PERL@ Makefile.PL)
+ cd perl-piped && $(PERL) Makefile.PL
+
+perl_shared: perl-shared/Makefile
+ cd perl-shared && $(MAKE) OPTIMIZE="$(CFLAGS)"
perl-shared/Makefile: perl-shared/Makefile.PL
- (cd perl-shared && @PERL@ Makefile.PL)
+ cd perl-shared && $(PERL) Makefile.PL
+
+# add the following to the all target
+all-local: @COMP_PERL@
+
+# add to install
+
+#to-autoconf:
+# aclocal -I config -I config/libtool --output=config/aclocal.m4
+# automake --foreign
+# autoconf --localdir=config
+# autoheader --localdir=config
+# ./configure
+
+to-docs: to-versync
+ (cd doc && $(MAKE) clean && $(MAKE))
+#to-autoconf
+to-versync:
+ perl -i -p -e '"$(VERSION)" =~ /(\d+)\.(\d+)\.(\d+)/; $$v=sprintf("%1d.%02d0%02d1" ,$${1},$${2},$${3}); s|VERSION\s*=\s*[\d.]+|VERSION = $$v|' perl-*/RRD?.pm
+ perl -i -p -e 's|RRDtool\s+\d+\.\d+\.\d+|RRDtool $(VER)|' src/*.[ch]
+
+to-dist: to-docs dist
+ mv $(PACKAGE)-$(VERSION).tar.gz archive
+
+to-scp: to-dist
+ scp CHANGES oetiker at tardis.ee.ethz.ch:/home/oetiker/public_html/webtools/rrdtool/pub/
+ scp archive/$(PACKAGE)-$(VERSION).tar.gz oetiker at tardis.ee.ethz.ch:/home/oetiker/public_html/webtools/rrdtool/pub/$(ARCHIVE)
+ scp CHANGES tobi at ipn.caida.org:/ipn/web/Tools/RRDtool/pub/
+ scp archive/$(PACKAGE)-$(VERSION).tar.gz tobi at ipn.caida.org:/ipn/web/Tools/RRDtool/pub/
+
+site-perl-inst:
+ cd perl-piped && $(MAKE) install
+ cd perl-shared && $(MAKE) install
+
+clean-local:
+ cd perl-piped && $(MAKE) clean
+ cd perl-shared && $(MAKE) clean
-clean:
- (cd gd1.2 && $(MAKE) clean)
- (cd src && $(MAKE) clean)
-
-distclean realclean:
- (cd gd1.2 && $(MAKE) realclean)
- (cd src && $(MAKE) realclean)
- @if test -f perl-shared/Makefile; then \
- echo '(cd perl-shared; $(MAKE) clean)' ; \
- (cd perl-shared; $(MAKE) clean); \
- fi
- @if test -f perl-piped/Makefile; then \
- echo '(cd perl-piped; $(MAKE) clean)' ; \
- (cd perl; $(MAKE) clean); \
- fi
- -rm -f config.cache config.h config.log config.status Makefile
-
-install: all
- (cd perl-piped && $(MAKE) install)
- (cd perl-shared && $(MAKE) install)
- $(INSTALL) -m 755 -o root -g root src/rrdtool $(PREFIX)/bin
- $(INSTALL) -m 644 -o root -g root doc/*.3 $(PREFIX)/man/man3
- $(INSTALL) -m 644 -o root -g root doc/*.1 $(PREFIX)/man/man1
+# Tell versions [3.59,3.63) of GNU make to not export all variables.
+# Otherwise a system limit (for SysV at least) may be exceeded.
+.NOEXPORT:
Modified: trunk/orca/packages/rrdtool-1.0.7.2/configure.in
==============================================================================
--- trunk/orca/packages/rrdtool-1.0.7.2/configure.in (original)
+++ trunk/orca/packages/rrdtool-1.0.7.2/configure.in Sat Jul 13 19:22:31 2002
@@ -1,49 +1,65 @@
-dnl This file is the master file for the configure process.
-dnl It is only edited by developers. If you are trying to compile
-dnl rrd_tool, use "sh configure" instead of this file.
+dnl RRDtool AutoConf script ...
+dnl ---------------------------
dnl
-dnl From it, config.h.in and configure are both made.
-dnl To make them, you (a developer) will need to install the latest
-dnl versions of GNU m4 and GNU autoconf. Then you run "autoheader"
-dnl in the distribution directory to make config.h.in, and
-dnl "autoconf" to make the configure script. These two files
-dnl must be shipped with the distribution; end users should not need
-dnl m4 and autoconf to be able to compile. All they need is
-dnl to run "sh configure".
+dnl Created by Jeff Allen, Tobi Oetiker, Blair Zajac
dnl
-dnl The format of this file is basically one macro to a line.
-dnl Text that is not a macro and not a comment is passed straight
-dnl through to the configure script.
dnl
-dnl Comments are "dnl" followed by comments. It means delete
-dnl to newline.
-dnl
-dnl When you want to pass a literal bit of text to a macro as
-dnl an argument, you use square brackets. Thus, you cannot use
-dnl the bracket shorthand for /bin/test... you must use the
-dnl actual program name.
-dnl
-dnl Our tests do not cache. This is because I'm too lazy to
-dnl figure out how to use the caching macros. There's not many,
-dnl so it's not a big problem.
-dnl
-dnl -jra
+dnl make sure we are being executed in the right place
AC_INIT(src/rrd_tool.c)
-AC_CONFIG_HEADER(config.h)
-dnl Check for programs.
+dnl all our local stuff like install scripts and include files
+dnl is in there
+AC_CONFIG_AUX_DIR(config)
+
+dnl determine the type of system we are running on
+AC_CANONICAL_SYSTEM
+
+dnl tell automake the this script is for rrdtool
+AM_INIT_AUTOMAKE(rrdtool, 1.0.7)
+
+dnl where we install our stuff ...
+AC_PREFIX_DEFAULT( /usr/local/rrdtool-1.0.7 )
+
+dnl tell automake which file to use as config header
+AM_CONFIG_HEADER(config/config.h)
+
+dnl Minimum Autoconf version required.
+AC_PREREQ(2.13)
+
+dnl Define library subdirectory names here.
+CGI_LIB_DIR=cgilib-0.4
+GD_LIB_DIR=gd1.3
+PNG_LIB_DIR=libpng-1.0.3
+ZLIB_LIB_DIR=zlib-1.1.3
+
+dnl substitute them in all the files listed in AC_OUTPUT
+AC_SUBST(CGI_LIB_DIR)
+AC_SUBST(GD_LIB_DIR)
+AC_SUBST(PNG_LIB_DIR)
+AC_SUBST(ZLIB_LIB_DIR)
+
+
+dnl Check for Perl.
+AC_PATH_PROG(PERL, perl, no)
+if test "x$PERL" = "nox"; then
+ COMP_PERL=
+else
+ COMP_PERL="perl_piped perl_shared"
+fi
+AC_SUBST(COMP_PERL)
+
+
+dnl Check for the compiler and static/shared library creation.
AC_PROG_CC
AC_PROG_CPP
-AC_PROG_INSTALL
-AC_PROG_LN_S
-AC_PROG_MAKE_SET
-AC_PROG_RANLIB
-AC_PATH_PROG(PERL, perl, no)
+dnl RRD_ACLOCAL_FIND_LIBTOOL
+dnl don't build a shared library ... perl will do it's own magic.
+dnl this can be changed when running configure
+AC_DISABLE_SHARED
-dnl Checks for libraries.
-AC_CHECK_LIB(m, acos)
+AM_PROG_LIBTOOL
dnl Checks for header files.
AC_HEADER_STDC
@@ -54,85 +70,205 @@
AC_HEADER_TIME
AC_STRUCT_TM
+dnl Checks for libraries.
+AC_CHECK_LIB(m, acos)
+
dnl If CC is not set we assume gcc
if test ${CC-gcc} = gcc; then
- rd_cv_prog_hpcc=no
-oCFLAGS_EXTRA=$CFLAGS_EXTRA
-CFLAGS_EXTRA="$CFLAGS_EXTRA -Wall -pedantic -fPIC"
-AC_CACHE_CHECK(if we can use GCC-specific compiler options, rd_cv_gcc_opt,
-[
-AC_TRY_COMPILE( , return 0 ,
- rd_cv_gcc_opt=yes,
- rd_cv_gcc_opt=no )
-])
-if test $rd_cv_gcc_opt = no; then
- CFLAGS_EXTRA=$oCFLAGS_EXTRA
-fi
-
-else
-AC_CACHE_CHECK(if we should use HP compiler options, rd_cv_prog_hpcc,
-[
-cat > conftest.c <<EOF
-#ifdef _HPUX_SOURCE
- yes;
-#endif
-EOF
-if AC_TRY_COMMAND(${CC-cc} -Ae -z -E conftest.c) | egrep yes >/dev/null 2>&1
-then
- rd_cv_prog_hpcc=yes
-else
- rd_cv_prog_hpcc=no
-fi])
-fi
-if test $rd_cv_prog_hpcc = yes; then
- CFLAGS_EXTRA="$CFLAGS_EXTRA -Ae -z"
+ oCFLAGS=$CFLAGS
+ CFLAGS="$CFLAGS -Wall -pedantic"
+ AC_CACHE_CHECK(if we can use GCC-specific compiler options, rd_cv_gcc_opt,
+ [
+ AC_TRY_COMPILE( , return 0 ,
+ rd_cv_gcc_opt=yes,
+ rd_cv_gcc_opt=no )
+ ])
+ if test $rd_cv_gcc_opt = no; then
+ CFLAGS=$oCFLAGS
+ fi
fi
dnl Checks for library functions.
AC_FUNC_STRFTIME
AC_FUNC_VPRINTF
-AC_CHECK_FUNCS(mktime getrusage gettimeofday)
-
-oCFLAGS_EXTRA=$CFLAGS_EXTRA
-AC_CACHE_CHECK([if the compiler does proper IEEE math], rd_cv_ieee_works,
-[AC_TRY_RUN([int main(void){
- double a=0.0/0.0,b=0.0,c;
- c=a/b;if (c==a) c=a;return isnan(a/c)?0:1;
- }],
- [rd_cv_ieee_works=yes],[
-AC_MSG_RESULT(no it does not)
-CFLAGS_EXTRA="$CFLAGS_EXTRA -ieee"
-AC_CACHE_CHECK([if the compiler does proper IEEE math with -ieee], rd_cv_ieee_switch,
-[AC_TRY_RUN([int main(void){
- double a=0.0/0.0,b=0.0,c;
- c=a/b;if (c==a) c=a;return isnan(a/c)?0:1;
+dnl for each function found we get a definition in config.h
+dnl of the form HAVE_FUNCTION
+AC_CHECK_FUNCS(snprintf fpclass isnan finite isinf memmove strchr mktime getrusage gettimeofday)
+
+dnl what does realloc do if it gets called with a NULL pointer
+
+AC_CACHE_CHECK([if realloc can deal with NULL], rd_cv_null_realloc,
+[AC_TRY_RUN([#include <stdlib.h>
+ int main(void){
+ char *x = NULL;
+ x = realloc (x,10);
+ if (x==NULL) return 1;
+ return 0;
}],
- [rd_cv_ieee_switch=yes],[rd_cv_ieee_switch=no;
-AC_MSG_RESULT(nope)
+ [rd_cv_null_realloc=yes],[rd_cv_null_realloc=nope],:)])
+
+if test x"$rd_cv_null_realloc" = xnope; then
+AC_DEFINE(NO_NULL_REALLOC)
+fi
+
+dnl determine how to get IEEE math to work on this box.
+
+AC_DEFUN(AC_IEEE,
+AC_CACHE_CHECK([if IEEE math works $1], [rd_cv_ieee_$2],
+[AC_TRY_RUN([$3
+
+#if (! defined(HAVE_FINITE) && defined(HAVE_ISNAN) && defined(HAVE_ISINF))
+# define HAVE_FINITE 1
+# define finite(a) (! isnan(a) && ! isinf(a))
+#endif
+
+#if (! defined(HAVE_ISINF) && defined(HAVE_FPCLASS))
+# define HAVE_ISINF 1
+# include <ieeefp.h>
+# define isinf(a) (fpclass(a) == FP_NINF || fpclass(a) == FP_PINF)
+#endif
+
+#include <stdio.h>
+int main(void){
+ double nan,inf,c,d;
+ $4;
+ /* some math to see if we get a segfault; */
+ nan=0.0/0.0;
+ inf=1.0/0.0;
+ c = 1.0;
+ c = c / 0.0; /* try getting fpe */
+ c = inf + nan;
+ c = inf / nan;
+ if (! isnan(nan)) {printf ("isnan(NaN) ... "); return 1;}
+ if (nan == nan) {printf ("nan != nan ..."); return 1;}
+ if (! isinf(inf)) {printf ("isinf(oo) ... "); return 1;}
+ if (! isinf(-inf)) {printf ("isinf(-oo) ... "); return 1;}
+ if (! inf > 0) {printf ("inf > 0 ... "); return 1;}
+ if (! -inf < 0) {printf ("inf < 0 ... "); return 1;}
+ return 0;
+}],
+ [rd_cv_ieee_$2=yes],[rd_cv_ieee_$2=no],:)]))
+
+oCFLAGS=$CFLAGS
+unset CFLAGS
+
+AC_IEEE([out of the box],works)
+
+if test "$rd_cv_ieee_works" != yes ; then
+
+CFLAGS=-ieee
+AC_IEEE([with the -ieee switch],switch)
+
+ if test "$rd_cv_ieee_switch" != yes ; then
+
+CFLAGS=-mieee
+AC_IEEE([with the -mieee switch],mswitch)
+
+ if test "$rd_cv_ieee_mswitch" != yes ; then
+
+CFLAGS="-q float=rndsngl"
+AC_IEEE([with the -q float=rndsngl switch],qswitch)
+
+ if test "$rd_cv_ieee_qswitch" != yes ; then
+
+unset CFLAGS
+
+AC_IEEE([with fpsetmask(0)],mask,[#include <floatingpoint.h>],[fpsetmask(0)])
+
+ if test "$rd_cv_ieee_mask" != yes ; then
+
+AC_IEEE([with signal(SIGFPE,SIG_IGN)],sigfpe,[#include <signal.h>],[signal(SIGFPE,SIG_IGN)])
+
+ if test "$rd_cv_ieee_sigfpe" != yes ; then
+
+
echo "--------------------------------------------------------------"
echo "Your Compiler does not do propper IEEE math ... "
echo "Please find out how to make IEEE math work with your Compiler"
-echo "And let me know (oetiker at ee.ethz.ch)"
+echo "and let me know (oetiker at ee.ethz.ch)"
+echo "Check config.log to see what went wrong ..."
echo ""
exit 1
-],:)])
-],:)])
+ fi
+ fi
+ fi
+ fi
+ fi
+fi
+
+CFLAGS=$oCFLAGS
-CFLAGS_EXTRA=$oCFLAGS_EXTRA
+if test x$rd_cv_ieee_sigfpe = xyes; then
+ AC_DEFINE(MUST_DISABLE_SIGFPE)
+fi
+
+if test x$rd_cv_ieee_mask = xyes; then
+ AC_DEFINE(MUST_DISABLE_FPMASK)
+fi
if test x$rd_cv_ieee_switch = xyes; then
- CFLAGS_EXTRA="$CFLAGS_EXTRA -ieee"
+ CFLAGS="$CFLAGS -ieee"
+fi
+
+if test x$rd_cv_ieee_mswitch = xyes; then
+ CFLAGS="$CFLAGS -mieee"
+fi
+
+if test x$rd_cv_ieee_qswitch = xyes; then
+ CFLAGS="$CFLAGS -q float=rndsngl"
fi
-AC_SUBST(CFLAGS_EXTRA)
AC_SUBST(CFLAGS)
-AC_OUTPUT(Makefile src/Makefile gd1.2/Makefile doc/GNUmakefile perl-shared/examples/shared-demo.pl perl-piped/examples/piped-demo.pl, [chmod +x perl-*/examples/*.pl])
+AC_OUTPUT(cgilib-0.4/Makefile \
+ config/Makefile \
+ examples/shared-demo.pl \
+ examples/piped-demo.pl \
+ examples/stripes.pl \
+ examples/bigtops.pl \
+ examples/minmax.pl \
+ examples/cgi-demo.cgi \
+ examples/Makefile \
+ contrib/Makefile \
+ contrib/trytime/Makefile \
+ contrib/log2rrd/Makefile \
+ contrib/log2rrd/log2rrd.pl \
+ contrib/rrd-file-icon/Makefile \
+ doc/Makefile \
+ gd1.3/Makefile \
+ libpng-1.0.3/Makefile \
+ zlib-1.1.3/Makefile \
+ src/Makefile \
+ Makefile, \
+ [chmod +x examples/*.cgi examples/*.pl contrib/*/*.pl])
AC_MSG_CHECKING(in)
AC_MSG_RESULT(and out again)
-echo $ac_n "ordering CD from http://cdnow.com/gift/oetiker@ee.ethz.ch""... $ac_c" 1>&6
-sleep 3
-AC_MSG_RESULT([just kidding ;-)])
+echo $ac_n "ordering CD from http://cdnow.com/gift/oetiker@ee.ethz.ch $ac_c" 1>&6
+sleep 1
+echo $ac_n ".$ac_c" 1>&6
+sleep 1
+echo $ac_n ".$ac_c" 1>&6
+sleep 1
+echo $ac_n ".$ac_c" 1>&6
+sleep 1
+echo $ac_n ".$ac_c" 1>&6
+sleep 1
+AC_MSG_RESULT([ just kidding ;-)])
+echo
+echo "----------------------------------------------------------------"
+echo "Config is DONE!"
+echo
+echo "Type 'make' to compile the software and use 'make install' to "
+echo "install everything to $prefix. If you want to install the perl"
+echo "modules in site-perl, try 'make site-perl-install'."
+echo
+echo " ... that wishlist mentioned above does realy exist. So if"
+echo "you feel like showing your appreciation for rrdtool this is the"
+echo "place to go. :-)"
+echo
+echo " -- Tobi Oetiker <oetiker at ee.ethz.ch>"
+echo "----------------------------------------------------------------"
+
Modified: trunk/orca/packages/rrdtool-1.0.7.2/src/rrd_format.c
==============================================================================
--- trunk/orca/packages/rrdtool-1.0.7.2/src/rrd_format.c (original)
+++ trunk/orca/packages/rrdtool-1.0.7.2/src/rrd_format.c Sat Jul 13 19:22:31 2002
@@ -1,5 +1,5 @@
/*****************************************************************************
- * RRDTOOL 0.99.31 Copyright Tobias Oetiker, 1999
+ * RRDtool Copyright Tobias Oetiker, 1999
*****************************************************************************
* rrd_format.c RRD Database Format helper functions
*****************************************************************************
Added: trunk/orca/packages/rrdtool-1.0.7.2/src/gdpng.c
==============================================================================
--- trunk/orca/packages/rrdtool-1.0.7.2/src/gdpng.c (original)
+++ trunk/orca/packages/rrdtool-1.0.7.2/src/gdpng.c Sat Jul 13 19:22:32 2002
@@ -0,0 +1,71 @@
+/*****************************************************************************
+ * RRDtool Copyright Tobias Oetiker, 1997, 1998, 1999
+ *****************************************************************************
+ * gdpng.c add PNG output routine to gd library
+ *****************************************************************************/
+
+#include <png.h>
+#include <gd.h>
+#include <stdlib.h>
+
+typedef struct _jmpbuf_wrapper {
+ jmp_buf jmpbuf;
+} jmpbuf_wrapper;
+
+static jmpbuf_wrapper gdPngJmpbufStruct;
+
+void gdImagePng(gdImagePtr im, FILE *out)
+{
+ int i;
+ png_colorp palette;
+ png_structp png_write_ptr =
+ png_create_write_struct(PNG_LIBPNG_VER_STRING,
+ (png_voidp)NULL,
+ /* we would need to point to error handlers
+ here to do it properly */
+ (png_error_ptr)NULL, (png_error_ptr)NULL);
+ png_infop info_ptr = png_create_info_struct(png_write_ptr);
+
+ if (setjmp(gdPngJmpbufStruct.jmpbuf)) {
+ png_destroy_write_struct(&png_write_ptr, &info_ptr);
+ return;
+ }
+
+ palette = (png_colorp)png_malloc (png_write_ptr,
+ im->colorsTotal*sizeof(png_color));
+ if (palette == NULL){
+ png_destroy_write_struct(&png_write_ptr, &info_ptr);
+ return;
+ }
+
+
+ png_init_io(png_write_ptr, out);
+ png_set_write_status_fn(png_write_ptr, NULL);
+ png_set_IHDR(png_write_ptr,info_ptr,
+ im->sx,im->sy,im->colorsTotal > 16 ? 8:4,
+ PNG_COLOR_TYPE_PALETTE,
+ im->interlace ? PNG_INTERLACE_ADAM7: PNG_INTERLACE_NONE ,
+ PNG_COMPRESSION_TYPE_DEFAULT, PNG_FILTER_TYPE_DEFAULT);
+ for(i=0;i<im->colorsTotal;i++){
+ palette[i].red = im->red[i];
+ palette[i].green = im->green[i];
+ palette[i].blue = im->blue[i];
+ }
+ png_set_PLTE(png_write_ptr, info_ptr, palette, im->colorsTotal);
+
+ /* choose between speed (1) and space (9) optimisation */
+ /* we want to be fast ... */
+ png_set_compression_level(png_write_ptr,1);
+ png_set_filter(png_write_ptr,PNG_FILTER_TYPE_BASE,PNG_NO_FILTERS);
+ /* store file info */
+ png_write_info(png_write_ptr, info_ptr);
+ png_set_packing(png_write_ptr);
+ png_write_image(png_write_ptr, im->pixels);
+ png_write_end(png_write_ptr, info_ptr);
+ png_free(png_write_ptr, palette);
+ png_destroy_write_struct(&png_write_ptr, &info_ptr);
+}
+
+
+
+
Modified: trunk/orca/packages/rrdtool-1.0.7.2/src/rrd_error.c
==============================================================================
--- trunk/orca/packages/rrdtool-1.0.7.2/src/rrd_error.c (original)
+++ trunk/orca/packages/rrdtool-1.0.7.2/src/rrd_error.c Sat Jul 13 19:22:32 2002
@@ -1,5 +1,5 @@
/*****************************************************************************
- * RRDTOOL 0.99.31 Copyright Tobias Oetiker, 1997, 1998, 1999
+ * RRDtool Copyright Tobias Oetiker, 1997, 1998, 1999
*****************************************************************************
* rrd_error.c Common Header File
*****************************************************************************
@@ -16,13 +16,14 @@
void
rrd_set_error(char *fmt, ...)
{
- int maxlen = strlen(fmt)*4;
+ static char buffer[4096];
va_list argp;
rrd_clear_error();
- rrd_error = malloc(sizeof(char)*maxlen);
va_start(argp, fmt);
- vsprintf(rrd_error, fmt, argp);
+ vsprintf(buffer, fmt, argp);
va_end(argp);
+ rrd_error = malloc(sizeof(char)*(strlen(buffer)+1));
+ strcpy(rrd_error, buffer);
}
int
Modified: trunk/orca/packages/rrdtool-1.0.7.2/src/rrd_fetch.c
==============================================================================
--- trunk/orca/packages/rrdtool-1.0.7.2/src/rrd_fetch.c (original)
+++ trunk/orca/packages/rrdtool-1.0.7.2/src/rrd_fetch.c Sat Jul 13 19:22:32 2002
@@ -1,5 +1,5 @@
/*****************************************************************************
- * RRDTOOL 0.99.31 Copyright Tobias Oetiker, 1997, 1998, 1999
+ * RRDtool Copyright Tobias Oetiker, 1997, 1998, 1999
*****************************************************************************
* rrd_fetch.c read date from an rrd to use for further processing
*****************************************************************************
@@ -24,32 +24,16 @@
{
- long step_tmp = 1, start_tmp = -24*3600, end_tmp=time(NULL);
+ long step_tmp =1;
+ time_t start_tmp=0, end_tmp=0;
enum cf_en cf_idx;
-#ifdef WANT_AT_STYLE_TIMESPEC
struct time_value start_tv, end_tv;
char *parsetime_error = NULL;
- int start_tmp_is_ok = 0,
- end_tmp_is_ok = 0;
- end_tv.type = ABSOLUTE_TIME;
- end_tv.tm = *localtime(&end_tmp);
- end_tv.offset = 0;
-
- start_tv.type = RELATIVE_TO_END_TIME;
- start_tv.tm = *localtime(&end_tmp); /* to init tm_zone and tm_gmtoff */
- start_tv.offset = -24*3600;/* to be compatible with the original code. */
- start_tv.tm.tm_sec = 0; /** alternatively we could set tm_mday to -1 */
- start_tv.tm.tm_min = 0; /** but this would yield -23(25) hours offset */
- start_tv.tm.tm_hour = 0; /** twice a year, when DST is coming in or */
- start_tv.tm.tm_mday = 0; /** out of effect */
- start_tv.tm.tm_mon = 0;
- start_tv.tm.tm_year = 0;
- start_tv.tm.tm_wday = 0;
- start_tv.tm.tm_yday = 0;
- start_tv.tm.tm_isdst = -1; /* for mktime to guess */
-#endif
+ /* init start and end time */
+ parsetime("end-24h", &start_tv);
+ parsetime("now", &end_tv);
while (1){
static struct option long_options[] =
@@ -69,45 +53,16 @@
switch(opt) {
case 's':
-#ifdef WANT_AT_STYLE_TIMESPEC
- {
- char *endp;
- start_tmp_is_ok = 0;
- start_tmp = strtol(optarg, &endp, 0);
- if (*endp == '\0') /* it was a valid number */
- if (start_tmp > 31122038 || /* 31 Dec 2038 in DDMMYYYY */
- start_tmp < 0) {
- start_tmp_is_ok = 1;
- break;
- }
if ((parsetime_error = parsetime(optarg, &start_tv))) {
rrd_set_error( "start time: %s", parsetime_error );
return -1;
- }
- }
-#else
- start_tmp = atol(optarg);
-#endif
+ }
break;
case 'e':
-#ifdef WANT_AT_STYLE_TIMESPEC
- {
- char *endp;
- end_tmp_is_ok = 0;
- end_tmp = strtol(optarg, &endp, 0);
- if (*endp == '\0') /* it was a valid number */
- if (end_tmp > 31122038) { /* 31 Dec 2038 in DDMMYYYY */
- end_tmp_is_ok = 1;
- break;
- }
if ((parsetime_error = parsetime(optarg, &end_tv))) {
rrd_set_error( "end time: %s", parsetime_error );
return -1;
- }
- }
-#else
- end_tmp = atol(optarg);
-#endif
+ }
break;
case 'r':
step_tmp = atol(optarg);
@@ -117,56 +72,12 @@
return(-1);
}
}
-#ifdef WANT_AT_STYLE_TIMESPEC
- if ((start_tv.type == RELATIVE_TO_END_TIME ||
- (start_tmp_is_ok && start_tmp < 0)) && /* same as the line above */
- end_tv.type == RELATIVE_TO_START_TIME) {
- rrd_set_error("the start and end times cannot be specified "
- "relative to each other");
- return(-1);
- }
-
- if (start_tv.type == RELATIVE_TO_START_TIME) {
- rrd_set_error("the start time cannot be specified relative to itself");
- return(-1);
- }
-
- if (end_tv.type == RELATIVE_TO_END_TIME) {
- rrd_set_error("the end time cannot be specified relative to itself");
- return(-1);
- }
-
- /* We don't care to keep all the values in their range,
- mktime will do this for us */
- if (start_tv.type == RELATIVE_TO_END_TIME) {
- if (end_tmp_is_ok)
- end_tv.tm = *localtime( &end_tmp );
- start_tv.tm.tm_sec += end_tv.tm.tm_sec;
- start_tv.tm.tm_min += end_tv.tm.tm_min;
- start_tv.tm.tm_hour += end_tv.tm.tm_hour;
- start_tv.tm.tm_mday += end_tv.tm.tm_mday;
- start_tv.tm.tm_mon += end_tv.tm.tm_mon;
- start_tv.tm.tm_year += end_tv.tm.tm_year;
- }
- if (end_tv.type == RELATIVE_TO_START_TIME) {
- if (start_tmp_is_ok)
- start_tv.tm = *localtime( &start_tmp );
- end_tv.tm.tm_sec += start_tv.tm.tm_sec;
- end_tv.tm.tm_min += start_tv.tm.tm_min;
- end_tv.tm.tm_hour += start_tv.tm.tm_hour;
- end_tv.tm.tm_mday += start_tv.tm.tm_mday;
- end_tv.tm.tm_mon += start_tv.tm.tm_mon;
- end_tv.tm.tm_year += start_tv.tm.tm_year;
- }
- if (!start_tmp_is_ok)
- start_tmp = mktime(&start_tv.tm) + start_tv.offset;
- if (!end_tmp_is_ok)
- end_tmp = mktime(&end_tv.tm) + end_tv.offset;
-#endif
- if (start_tmp <= 0)
- start_tmp = end_tmp + start_tmp;
+ if (proc_start_end(&start_tv,&end_tv,&start_tmp,&end_tmp) == -1){
+ return -1;
+ }
+
if (start_tmp < 3600*24*365*10){
rrd_set_error("the first entry to fetch should be after 1980");
@@ -247,7 +158,9 @@
fclose(in_file);
return(-1);
}
- strncpy((*ds_namv)[i],rrd.ds_def[i].ds_nam,DS_NAM_SIZE);
+ strncpy((*ds_namv)[i],rrd.ds_def[i].ds_nam,DS_NAM_SIZE-1);
+ (*ds_namv)[i][DS_NAM_SIZE-1]='\0';
+
}
/* find the rra which best matches the requirements */
@@ -262,8 +175,8 @@
- (rrd.rra_def[i].pdp_cnt
* rrd.rra_def[i].row_cnt
* rrd.stat_head->pdp_step));
-
- full_match = *start - *end;
+
+ full_match = *end -*start;
/* best full match */
if(cal_end >= *end
&& cal_start <= *start){
Added: trunk/orca/packages/rrdtool-1.0.7.2/src/gifsize.c
==============================================================================
--- trunk/orca/packages/rrdtool-1.0.7.2/src/gifsize.c (original)
+++ trunk/orca/packages/rrdtool-1.0.7.2/src/gifsize.c Sat Jul 13 19:22:32 2002
@@ -0,0 +1,196 @@
+/****************************************************************************
+ * RRDtool Copyright Tobias Oetiker, 1997,1998, 1999
+ ****************************************************************************
+ * gifsize.c provides the function gifsize which determines the size of a gif
+ ****************************************************************************/
+
+/* This is built from code originally created by: */
+
+/* +-------------------------------------------------------------------+ */
+/* | Copyright 1990, 1991, 1993, David Koblas. (koblas at netcom.com) | */
+/* | Permission to use, copy, modify, and distribute this software | */
+/* | and its documentation for any purpose and without fee is hereby | */
+/* | granted, provided that the above copyright notice appear in all | */
+/* | copies and that both that copyright notice and this permission | */
+/* | notice appear in supporting documentation. This software is | */
+/* | provided "as is" without express or implied warranty. | */
+/* +-------------------------------------------------------------------+ */
+
+
+#include <stdio.h>
+#include <math.h>
+#include <string.h>
+#include <stdlib.h>
+
+
+#define MAXCOLORMAPSIZE 256
+
+#define TRUE 1
+#define FALSE 0
+
+#define CM_RED 0
+#define CM_GREEN 1
+#define CM_BLUE 2
+
+
+#define LOCALCOLORMAP 0x80
+#define BitSet(byte, bit) (((byte) & (bit)) == (bit))
+
+#define ReadOK(file,buffer,len) (fread(buffer, len, 1, file) != 0)
+
+#define LM_to_uint(a,b) (((b)<<8)|(a))
+
+
+static struct {
+ int transparent;
+ int delayTime;
+ int inputFlag;
+ int disposal;
+} Gif89 = { -1, -1, -1, 0 };
+
+static int ReadColorMap (FILE *fd, int number, unsigned char (*buffer)[256]);
+static int DoExtension (FILE *fd, int label, int *Transparent);
+static int GetDataBlock (FILE *fd, unsigned char *buf);
+
+int ZeroDataBlock;
+
+int
+GifSize(FILE *fd, long *width, long *height)
+{
+ int imageNumber;
+ int BitPixel;
+ int ColorResolution;
+ int Background;
+ int AspectRatio;
+ int Transparent = (-1);
+ unsigned char buf[16];
+ unsigned char c;
+ unsigned char ColorMap[3][MAXCOLORMAPSIZE];
+ int imageCount = 0;
+ char version[4];
+ ZeroDataBlock = FALSE;
+
+ imageNumber = 1;
+ if (! ReadOK(fd,buf,6)) {
+ return 0;
+ }
+ if (strncmp((char *)buf,"GIF",3) != 0) {
+ return 0;
+ }
+ strncpy(version, (char *)buf + 3, 3);
+ version[3] = '\0';
+
+ if ((strcmp(version, "87a") != 0) && (strcmp(version, "89a") != 0)) {
+ return 0;
+ }
+ if (! ReadOK(fd,buf,7)) {
+ return 0;
+ }
+ BitPixel = 2<<(buf[4]&0x07);
+ ColorResolution = (int) (((buf[4]&0x70)>>3)+1);
+ Background = buf[5];
+ AspectRatio = buf[6];
+
+ if (BitSet(buf[4], LOCALCOLORMAP)) { /* Global Colormap */
+ if (ReadColorMap(fd, BitPixel, ColorMap)) {
+ return 0;
+ }
+ }
+ for (;;) {
+ if (! ReadOK(fd,&c,1)) {
+ return 0;
+ }
+ if (c == ';') { /* GIF terminator */
+ if (imageCount < imageNumber) {
+ return 0;
+ }
+ }
+
+ if (c == '!') { /* Extension */
+ if (! ReadOK(fd,&c,1)) {
+ return 0;
+ }
+ DoExtension(fd, c, &Transparent);
+ continue;
+ }
+
+ if (c != ',') { /* Not a valid start character */
+ continue;
+ }
+
+ ++imageCount;
+
+ if (! ReadOK(fd,buf,9)) {
+ return 0;
+ }
+
+ (*width) = LM_to_uint(buf[4],buf[5]);
+ (*height) = LM_to_uint(buf[6],buf[7]);
+ return 1;
+ }
+}
+
+static int
+ReadColorMap(FILE *fd, int number, unsigned char (*buffer)[256])
+{
+ int i;
+ unsigned char rgb[3];
+
+
+ for (i = 0; i < number; ++i) {
+ if (! ReadOK(fd, rgb, sizeof(rgb))) {
+ return TRUE;
+ }
+ buffer[CM_RED][i] = rgb[0] ;
+ buffer[CM_GREEN][i] = rgb[1] ;
+ buffer[CM_BLUE][i] = rgb[2] ;
+ }
+
+
+ return FALSE;
+}
+
+static int
+DoExtension(FILE *fd, int label, int *Transparent)
+{
+ static unsigned char buf[256];
+
+ switch (label) {
+ case 0xf9: /* Graphic Control Extension */
+ (void) GetDataBlock(fd, (unsigned char*) buf);
+ Gif89.disposal = (buf[0] >> 2) & 0x7;
+ Gif89.inputFlag = (buf[0] >> 1) & 0x1;
+ Gif89.delayTime = LM_to_uint(buf[1],buf[2]);
+ if ((buf[0] & 0x1) != 0)
+ *Transparent = buf[3];
+
+ while (GetDataBlock(fd, (unsigned char*) buf) != 0)
+ ;
+ return FALSE;
+ default:
+ break;
+ }
+ while (GetDataBlock(fd, (unsigned char*) buf) != 0)
+ ;
+
+ return FALSE;
+}
+
+static int
+GetDataBlock(FILE *fd, unsigned char *buf)
+{
+ unsigned char count;
+
+ if (! ReadOK(fd,&count,1)) {
+ return -1;
+ }
+
+ ZeroDataBlock = count == 0;
+
+ if ((count != 0) && (! ReadOK(fd, buf, count))) {
+ return -1;
+ }
+
+ return count;
+}
+
Modified: trunk/orca/packages/rrdtool-1.0.7.2/src/rrdtool.dsw
==============================================================================
--- trunk/orca/packages/rrdtool-1.0.7.2/src/rrdtool.dsw (original)
+++ trunk/orca/packages/rrdtool-1.0.7.2/src/rrdtool.dsw Sat Jul 13 19:22:32 2002
@@ -3,7 +3,7 @@
###############################################################################
-Project: "gd"="..\gd1.2\gd.dsp" - Package Owner=<4>
+Project: "cgilib"="..\cgilib-0.4\cgilib.dsp" - Package Owner=<4>
Package=<5>
{{{
@@ -15,6 +15,36 @@
###############################################################################
+Project: "gd"="..\gd1.3\gd.dsp" - Package Owner=<4>
+
+Package=<5>
+{{{
+}}}
+
+Package=<4>
+{{{
+ Begin Project Dependency
+ Project_Dep_Name png
+ End Project Dependency
+}}}
+
+###############################################################################
+
+Project: "png"="..\libpng-1.0.3\png.dsp" - Package Owner=<4>
+
+Package=<5>
+{{{
+}}}
+
+Package=<4>
+{{{
+ Begin Project Dependency
+ Project_Dep_Name zlib
+ End Project Dependency
+}}}
+
+###############################################################################
+
Project: "rrd"=".\rrd.dsp" - Package Owner=<4>
Package=<5>
@@ -23,6 +53,27 @@
Package=<4>
{{{
+ Begin Project Dependency
+ Project_Dep_Name gd
+ End Project Dependency
+}}}
+
+###############################################################################
+
+Project: "rrd_cgi"=".\rrd_cgi.dsp" - Package Owner=<4>
+
+Package=<5>
+{{{
+}}}
+
+Package=<4>
+{{{
+ Begin Project Dependency
+ Project_Dep_Name cgilib
+ End Project Dependency
+ Begin Project Dependency
+ Project_Dep_Name rrd
+ End Project Dependency
}}}
###############################################################################
@@ -31,6 +82,21 @@
Package=<5>
{{{
+}}}
+
+Package=<4>
+{{{
+ Begin Project Dependency
+ Project_Dep_Name rrd
+ End Project Dependency
+}}}
+
+###############################################################################
+
+Project: "zlib"="..\zlib-1.1.3\zlib.dsp" - Package Owner=<4>
+
+Package=<5>
+{{{
}}}
Package=<4>
Modified: trunk/orca/packages/rrdtool-1.0.7.2/src/Makefile.in
==============================================================================
--- trunk/orca/packages/rrdtool-1.0.7.2/src/Makefile.in (original)
+++ trunk/orca/packages/rrdtool-1.0.7.2/src/Makefile.in Sat Jul 13 19:22:32 2002
@@ -1,54 +1,468 @@
-# things that the GNU standards document suggests all makefiles
-# should have.
-SHELL = /bin/sh
-.SUFFIXES:
-.SUFFIXES: .c .o .pl .pm .pod .html .man
-
-# variables we got from configure
-# (you can mess with these, if you want)
+# Makefile.in generated automatically by automake 1.4 from Makefile.am
+# Copyright (C) 1994, 1995-8, 1999 Free Software Foundation, Inc.
+# This Makefile.in is free software; the Free Software Foundation
+# gives unlimited permission to copy and/or distribute it,
+# with or without modifications, as long as this notice is preserved.
+
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY, to the extent permitted by law; without
+# even the implied warranty of MERCHANTABILITY or FITNESS FOR A
+# PARTICULAR PURPOSE.
+
+#AUTOMAKE_OPTIONS = foreign
+#
+#ACLOCAL_M4 = $(top_srcdir)/config/aclocal.m4
+#AUTOHEADER = @AUTOHEADER@ --localdir=$(top_srcdir)/config
+
+
+SHELL = @SHELL@
+
+srcdir = @srcdir@
+top_srcdir = @top_srcdir@
+VPATH = @srcdir@
+prefix = @prefix@
+exec_prefix = @exec_prefix@
+
+bindir = @bindir@
+sbindir = @sbindir@
+libexecdir = @libexecdir@
+datadir = @datadir@
+sysconfdir = @sysconfdir@
+sharedstatedir = @sharedstatedir@
+localstatedir = @localstatedir@
+libdir = @libdir@
+infodir = @infodir@
+mandir = @mandir@
+includedir = @includedir@
+oldincludedir = /usr/include
+
+DESTDIR =
+
+pkgdatadir = $(datadir)/@PACKAGE@
+pkglibdir = $(libdir)/@PACKAGE@
+pkgincludedir = $(includedir)/@PACKAGE@
+
+top_builddir = ..
+
+ACLOCAL = @ACLOCAL@
+AUTOCONF = @AUTOCONF@
+AUTOMAKE = @AUTOMAKE@
+AUTOHEADER = @AUTOHEADER@
+
+INSTALL = @INSTALL@
+INSTALL_PROGRAM = @INSTALL_PROGRAM@ $(AM_INSTALL_PROGRAM_FLAGS)
+INSTALL_DATA = @INSTALL_DATA@
+INSTALL_SCRIPT = @INSTALL_SCRIPT@
+transform = @program_transform_name@
+
+NORMAL_INSTALL = :
+PRE_INSTALL = :
+POST_INSTALL = :
+NORMAL_UNINSTALL = :
+PRE_UNINSTALL = :
+POST_UNINSTALL = :
+build_alias = @build_alias@
+build_triplet = @build@
+host_alias = @host_alias@
+host_triplet = @host@
+target_alias = @target_alias@
+target_triplet = @target@
CC = @CC@
-AR = ar
-CFLAGS_EXTRA = @CFLAGS_EXTRA@
CFLAGS = @CFLAGS@
-CFLAGS_ALL = $(CFLAGS) $(CFLAGS_EXTRA) -I.. -I../gd1.2 -DHAVE_CONFIG_H \
- -DWANT_AT_STYLE_TIMESPEC
-LDFLAGS = @LDFLAGS@
-LIBS = -L../gd1.2 -lgd @LIBS@
-LIBS_DEPEND = ../gd1.2/libgd.a
+COMP_PERL = @COMP_PERL@
+CPP = @CPP@
+LIBTOOL = @LIBTOOL@
+PERL = @PERL@
+RANLIB = @RANLIB@
+
+CGI_LIB_DIR = $(top_srcdir)/@CGI_LIB_DIR@
+GD_LIB_DIR = $(top_srcdir)/@GD_LIB_DIR@
+PNG_LIB_DIR = $(top_srcdir)/@PNG_LIB_DIR@
+ZLIB_LIB_DIR = $(top_srcdir)/@ZLIB_LIB_DIR@
+
+INCLUDES = -I$(CGI_LIB_DIR) -I$(GD_LIB_DIR) -I$(PNG_LIB_DIR) -I$(ZLIB_LIB_DIR)
+
+#COMPILE = $(CC) $(DEFS) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) $(CFLAGS_EXTRA)
+#LTCOMPILE = $(LIBTOOL) --mode=compile $(CC) $(DEFS) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) $(CFLAGS_EXTRA)
+#LINK = $(LIBTOOL) --mode=link $(CCLD) $(AM_CFLAGS) $(CFLAGS) $(CFLAGS_EXTRA) $(LDFLAGS) -o $@
-###
-### Things you might NOT want to play with ...
-###
+RRD_C_FILES = gdpng.c getopt.c getopt1.c gifsize.c parsetime.c pngsize.c rrd_create.c rrd_diff.c rrd_dump.c rrd_error.c rrd_fetch.c rrd_format.c rrd_graph.c rrd_last.c rrd_open.c rrd_resize.c rrd_restore.c rrd_tune.c rrd_update.c getopt.h ntconfig.h parsetime.h rrd_format.h rrd_tool.h
-HEADERS = rrd_tool.h rrd_format.h getopt.h parsetime.h
-SRCLIB = rrd_fetch.c rrd_create.c rrd_dump.c rrd_graph.c rrd_tune.c\
- rrd_open.c rrd_diff.c rrd_last.c rrd_update.c rrd_format.c rrd_error.c\
- getopt.c getopt1.c parsetime.c rrd_resize.c
+# Build two libraries. One is a public one that gets installed in
+# $prefix/lib. Libtool does not create an archive of the PIC compiled
+# objects for this library type. The second library is a private one
+# meant to build the RRDs.so for perl-shared. In this case libtool
+# creates a ./.lib/*.al file that contains the PIC compiled object
+# files.
-SRC = $(SRCLIB) rrd_tool.c
+RRD_LIBS = $(CGI_LIB_DIR)/librrd_cgi.la $(GD_LIB_DIR)/librrd_gd.la $(PNG_LIB_DIR)/librrd_png.la $(ZLIB_LIB_DIR)/librrd_z.la
-OBJ = $(SRC:.c=.o)
-OBJLIB = $(SRCLIB:.c=.o)
-all: rrdtool librrd.a
+lib_LTLIBRARIES = librrd.la
+noinst_LTLIBRARIES = librrd_private.la
-rrdtool: $(OBJ) $(LIBS_DEPEND)
- $(CC) -o rrdtool $(OBJ) $(LDFLAGS) $(LIBS)
+librrd_la_SOURCES = $(RRD_C_FILES)
+librrd_private_la_SOURCES = $(RRD_C_FILES)
-librrd.a: $(OBJLIB)
- $(AR) rc librrd.a $(OBJLIB)
- @RANLIB@ librrd.a
+librrd_la_LIBADD = $(RRD_LIBS)
+librrd_private_la_LIBADD = $(RRD_LIBS)
+librrd_la_LDFLAGS = -version-info 0:0:0
-rrdtool.pure: $(OBJ) $(LIBS_DEPEND)
- purify $(CC) -o rrdtool.pure $(OBJ) $(LDFLAGS) $(LIBS)
+bin_PROGRAMS = rrdcgi rrdtool
-clean:
- -rm *.o *.a
+rrdcgi_SOURCES = rrd_cgi.c
+rrdcgi_LDADD = librrd.la
-realclean: clean
- -rm rrdtool rrdtool.pure
+rrdtool_SOURCES = rrd_tool.c
+rrdtool_LDADD = librrd.la
-.c.o: $(HEADERS)
- $(CC) $(CFLAGS_ALL) -c $<
+EXTRA_DIST = rrdtool.dsp rrdtool.dsw
+mkinstalldirs = $(SHELL) $(top_srcdir)/config/mkinstalldirs
+CONFIG_HEADER = ../config/config.h
+CONFIG_CLEAN_FILES =
+LTLIBRARIES = $(lib_LTLIBRARIES) $(noinst_LTLIBRARIES)
+
+
+DEFS = @DEFS@ -I. -I$(srcdir) -I../config
+CPPFLAGS = @CPPFLAGS@
+LDFLAGS = @LDFLAGS@
+LIBS = @LIBS@
+librrd_la_DEPENDENCIES = $(CGI_LIB_DIR)/librrd_cgi.la \
+$(GD_LIB_DIR)/librrd_gd.la $(PNG_LIB_DIR)/librrd_png.la \
+$(ZLIB_LIB_DIR)/librrd_z.la
+librrd_la_OBJECTS = gdpng.lo getopt.lo getopt1.lo gifsize.lo \
+parsetime.lo pngsize.lo rrd_create.lo rrd_diff.lo rrd_dump.lo \
+rrd_error.lo rrd_fetch.lo rrd_format.lo rrd_graph.lo rrd_last.lo \
+rrd_open.lo rrd_resize.lo rrd_restore.lo rrd_tune.lo rrd_update.lo
+librrd_private_la_LDFLAGS =
+librrd_private_la_DEPENDENCIES = $(CGI_LIB_DIR)/librrd_cgi.la \
+$(GD_LIB_DIR)/librrd_gd.la $(PNG_LIB_DIR)/librrd_png.la \
+$(ZLIB_LIB_DIR)/librrd_z.la
+librrd_private_la_OBJECTS = gdpng.lo getopt.lo getopt1.lo gifsize.lo \
+parsetime.lo pngsize.lo rrd_create.lo rrd_diff.lo rrd_dump.lo \
+rrd_error.lo rrd_fetch.lo rrd_format.lo rrd_graph.lo rrd_last.lo \
+rrd_open.lo rrd_resize.lo rrd_restore.lo rrd_tune.lo rrd_update.lo
+PROGRAMS = $(bin_PROGRAMS)
+
+rrdcgi_OBJECTS = rrd_cgi.o
+rrdcgi_DEPENDENCIES = librrd.la
+rrdcgi_LDFLAGS =
+rrdtool_OBJECTS = rrd_tool.o
+rrdtool_DEPENDENCIES = librrd.la
+rrdtool_LDFLAGS =
+COMPILE = $(CC) $(DEFS) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS)
+LTCOMPILE = $(LIBTOOL) --mode=compile $(CC) $(DEFS) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS)
+CCLD = $(CC)
+LINK = $(LIBTOOL) --mode=link $(CCLD) $(AM_CFLAGS) $(CFLAGS) $(LDFLAGS) -o $@
+DIST_COMMON = Makefile.am Makefile.in
+
+
+PACKAGE = @PACKAGE@
+VERSION = @VERSION@
+
+DISTFILES = $(DIST_COMMON) $(SOURCES) $(HEADERS) $(TEXINFOS) $(EXTRA_DIST)
+
+TAR = tar
+GZIP_ENV = --best
+SOURCES = $(librrd_la_SOURCES) $(librrd_private_la_SOURCES) $(rrdcgi_SOURCES) $(rrdtool_SOURCES)
+OBJECTS = $(librrd_la_OBJECTS) $(librrd_private_la_OBJECTS) $(rrdcgi_OBJECTS) $(rrdtool_OBJECTS)
+
+all: all-redirect
+.SUFFIXES:
+.SUFFIXES: .S .c .lo .o .s
+$(srcdir)/Makefile.in: Makefile.am $(top_srcdir)/configure.in $(ACLOCAL_M4)
+ cd $(top_srcdir) && $(AUTOMAKE) --gnu --include-deps src/Makefile
+
+Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status
+ cd $(top_builddir) \
+ && CONFIG_FILES=$(subdir)/$@ CONFIG_HEADERS= $(SHELL) ./config.status
+
+
+mostlyclean-libLTLIBRARIES:
+
+clean-libLTLIBRARIES:
+ -test -z "$(lib_LTLIBRARIES)" || rm -f $(lib_LTLIBRARIES)
+
+distclean-libLTLIBRARIES:
+
+maintainer-clean-libLTLIBRARIES:
+
+install-libLTLIBRARIES: $(lib_LTLIBRARIES)
+ @$(NORMAL_INSTALL)
+ $(mkinstalldirs) $(DESTDIR)$(libdir)
+ @list='$(lib_LTLIBRARIES)'; for p in $$list; do \
+ if test -f $$p; then \
+ echo "$(LIBTOOL) --mode=install $(INSTALL) $$p $(DESTDIR)$(libdir)/$$p"; \
+ $(LIBTOOL) --mode=install $(INSTALL) $$p $(DESTDIR)$(libdir)/$$p; \
+ else :; fi; \
+ done
+
+uninstall-libLTLIBRARIES:
+ @$(NORMAL_UNINSTALL)
+ list='$(lib_LTLIBRARIES)'; for p in $$list; do \
+ $(LIBTOOL) --mode=uninstall rm -f $(DESTDIR)$(libdir)/$$p; \
+ done
+
+mostlyclean-noinstLTLIBRARIES:
+
+clean-noinstLTLIBRARIES:
+ -test -z "$(noinst_LTLIBRARIES)" || rm -f $(noinst_LTLIBRARIES)
+
+distclean-noinstLTLIBRARIES:
+
+maintainer-clean-noinstLTLIBRARIES:
+
+.c.o:
+ $(COMPILE) -c $<
+
+.s.o:
+ $(COMPILE) -c $<
+
+.S.o:
+ $(COMPILE) -c $<
+
+mostlyclean-compile:
+ -rm -f *.o core *.core
+
+clean-compile:
+
+distclean-compile:
+ -rm -f *.tab.c
+
+maintainer-clean-compile:
+
+.c.lo:
+ $(LIBTOOL) --mode=compile $(COMPILE) -c $<
+
+.s.lo:
+ $(LIBTOOL) --mode=compile $(COMPILE) -c $<
+
+.S.lo:
+ $(LIBTOOL) --mode=compile $(COMPILE) -c $<
+
+mostlyclean-libtool:
+ -rm -f *.lo
+
+clean-libtool:
+ -rm -rf .libs _libs
+
+distclean-libtool:
+
+maintainer-clean-libtool:
+
+librrd.la: $(librrd_la_OBJECTS) $(librrd_la_DEPENDENCIES)
+ $(LINK) -rpath $(libdir) $(librrd_la_LDFLAGS) $(librrd_la_OBJECTS) $(librrd_la_LIBADD) $(LIBS)
+
+librrd_private.la: $(librrd_private_la_OBJECTS) $(librrd_private_la_DEPENDENCIES)
+ $(LINK) $(librrd_private_la_LDFLAGS) $(librrd_private_la_OBJECTS) $(librrd_private_la_LIBADD) $(LIBS)
+
+mostlyclean-binPROGRAMS:
+
+clean-binPROGRAMS:
+ -test -z "$(bin_PROGRAMS)" || rm -f $(bin_PROGRAMS)
+
+distclean-binPROGRAMS:
+
+maintainer-clean-binPROGRAMS:
+
+install-binPROGRAMS: $(bin_PROGRAMS)
+ @$(NORMAL_INSTALL)
+ $(mkinstalldirs) $(DESTDIR)$(bindir)
+ @list='$(bin_PROGRAMS)'; for p in $$list; do \
+ if test -f $$p; then \
+ echo " $(LIBTOOL) --mode=install $(INSTALL_PROGRAM) $$p $(DESTDIR)$(bindir)/`echo $$p|sed 's/$(EXEEXT)$$//'|sed '$(transform)'|sed 's/$$/$(EXEEXT)/'`"; \
+ $(LIBTOOL) --mode=install $(INSTALL_PROGRAM) $$p $(DESTDIR)$(bindir)/`echo $$p|sed 's/$(EXEEXT)$$//'|sed '$(transform)'|sed 's/$$/$(EXEEXT)/'`; \
+ else :; fi; \
+ done
+
+uninstall-binPROGRAMS:
+ @$(NORMAL_UNINSTALL)
+ list='$(bin_PROGRAMS)'; for p in $$list; do \
+ rm -f $(DESTDIR)$(bindir)/`echo $$p|sed 's/$(EXEEXT)$$//'|sed '$(transform)'|sed 's/$$/$(EXEEXT)/'`; \
+ done
+
+rrdcgi: $(rrdcgi_OBJECTS) $(rrdcgi_DEPENDENCIES)
+ @rm -f rrdcgi
+ $(LINK) $(rrdcgi_LDFLAGS) $(rrdcgi_OBJECTS) $(rrdcgi_LDADD) $(LIBS)
+
+rrdtool: $(rrdtool_OBJECTS) $(rrdtool_DEPENDENCIES)
+ @rm -f rrdtool
+ $(LINK) $(rrdtool_LDFLAGS) $(rrdtool_OBJECTS) $(rrdtool_LDADD) $(LIBS)
+
+tags: TAGS
+
+ID: $(HEADERS) $(SOURCES) $(LISP)
+ list='$(SOURCES) $(HEADERS)'; \
+ unique=`for i in $$list; do echo $$i; done | \
+ awk ' { files[$$0] = 1; } \
+ END { for (i in files) print i; }'`; \
+ here=`pwd` && cd $(srcdir) \
+ && mkid -f$$here/ID $$unique $(LISP)
+
+TAGS: $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) $(LISP)
+ tags=; \
+ here=`pwd`; \
+ list='$(SOURCES) $(HEADERS)'; \
+ unique=`for i in $$list; do echo $$i; done | \
+ awk ' { files[$$0] = 1; } \
+ END { for (i in files) print i; }'`; \
+ test -z "$(ETAGS_ARGS)$$unique$(LISP)$$tags" \
+ || (cd $(srcdir) && etags $(ETAGS_ARGS) $$tags $$unique $(LISP) -o $$here/TAGS)
+
+mostlyclean-tags:
+
+clean-tags:
+
+distclean-tags:
+ -rm -f TAGS ID
+
+maintainer-clean-tags:
+
+distdir = $(top_builddir)/$(PACKAGE)-$(VERSION)/$(subdir)
+
+subdir = src
+
+distdir: $(DISTFILES)
+ @for file in $(DISTFILES); do \
+ d=$(srcdir); \
+ if test -d $$d/$$file; then \
+ cp -pr $$/$$file $(distdir)/$$file; \
+ else \
+ test -f $(distdir)/$$file \
+ || ln $$d/$$file $(distdir)/$$file 2> /dev/null \
+ || cp -p $$d/$$file $(distdir)/$$file || :; \
+ fi; \
+ done
+gdpng.lo gdpng.o : gdpng.c ../libpng-1.0.3/png.h ../zlib-1.1.3/zlib.h \
+ ../zlib-1.1.3/zconf.h ../libpng-1.0.3/pngconf.h ../gd1.3/gd.h
+getopt.lo getopt.o : getopt.c ../config/config.h
+getopt1.lo getopt1.o : getopt1.c ../config/config.h getopt.h
+gifsize.lo gifsize.o : gifsize.c
+parsetime.lo parsetime.o : parsetime.c rrd_tool.h ../config/config.h \
+ parsetime.h getopt.h rrd_format.h ../gd1.3/gd.h
+pngsize.lo pngsize.o : pngsize.c ../libpng-1.0.3/png.h \
+ ../zlib-1.1.3/zlib.h ../zlib-1.1.3/zconf.h \
+ ../libpng-1.0.3/pngconf.h
+rrd_cgi.o: rrd_cgi.c rrd_tool.h ../config/config.h parsetime.h getopt.h \
+ rrd_format.h ../gd1.3/gd.h ../cgilib-0.4/cgi.h
+rrd_create.lo rrd_create.o : rrd_create.c rrd_tool.h ../config/config.h \
+ parsetime.h getopt.h rrd_format.h ../gd1.3/gd.h
+rrd_diff.lo rrd_diff.o : rrd_diff.c rrd_tool.h ../config/config.h \
+ parsetime.h getopt.h rrd_format.h ../gd1.3/gd.h
+rrd_dump.lo rrd_dump.o : rrd_dump.c rrd_tool.h ../config/config.h \
+ parsetime.h getopt.h rrd_format.h ../gd1.3/gd.h
+rrd_error.lo rrd_error.o : rrd_error.c rrd_tool.h ../config/config.h \
+ parsetime.h getopt.h rrd_format.h ../gd1.3/gd.h
+rrd_fetch.lo rrd_fetch.o : rrd_fetch.c rrd_tool.h ../config/config.h \
+ parsetime.h getopt.h rrd_format.h ../gd1.3/gd.h
+rrd_format.lo rrd_format.o : rrd_format.c rrd_tool.h ../config/config.h \
+ parsetime.h getopt.h rrd_format.h ../gd1.3/gd.h
+rrd_graph.lo rrd_graph.o : rrd_graph.c rrd_tool.h ../config/config.h \
+ parsetime.h getopt.h rrd_format.h ../gd1.3/gd.h \
+ ../gd1.3/gdlucidan10.h ../gd1.3/gdlucidab12.h
+rrd_last.lo rrd_last.o : rrd_last.c rrd_tool.h ../config/config.h \
+ parsetime.h getopt.h rrd_format.h ../gd1.3/gd.h
+rrd_open.lo rrd_open.o : rrd_open.c rrd_tool.h ../config/config.h \
+ parsetime.h getopt.h rrd_format.h ../gd1.3/gd.h
+rrd_resize.lo rrd_resize.o : rrd_resize.c rrd_tool.h ../config/config.h \
+ parsetime.h getopt.h rrd_format.h ../gd1.3/gd.h
+rrd_restore.lo rrd_restore.o : rrd_restore.c rrd_tool.h \
+ ../config/config.h parsetime.h getopt.h rrd_format.h \
+ ../gd1.3/gd.h
+rrd_tool.o: rrd_tool.c rrd_tool.h ../config/config.h parsetime.h \
+ getopt.h rrd_format.h ../gd1.3/gd.h
+rrd_tune.lo rrd_tune.o : rrd_tune.c rrd_tool.h ../config/config.h \
+ parsetime.h getopt.h rrd_format.h ../gd1.3/gd.h
+rrd_update.lo rrd_update.o : rrd_update.c rrd_tool.h ../config/config.h \
+ parsetime.h getopt.h rrd_format.h ../gd1.3/gd.h
+
+info-am:
+info: info-am
+dvi-am:
+dvi: dvi-am
+check-am: all-am
+check: check-am
+installcheck-am:
+installcheck: installcheck-am
+install-exec-am: install-libLTLIBRARIES install-binPROGRAMS
+install-exec: install-exec-am
+
+install-data-am:
+install-data: install-data-am
+
+install-am: all-am
+ @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am
+install: install-am
+uninstall-am: uninstall-libLTLIBRARIES uninstall-binPROGRAMS
+uninstall: uninstall-am
+all-am: Makefile $(LTLIBRARIES) $(PROGRAMS)
+all-redirect: all-am
+install-strip:
+ $(MAKE) $(AM_MAKEFLAGS) AM_INSTALL_PROGRAM_FLAGS=-s install
+installdirs:
+ $(mkinstalldirs) $(DESTDIR)$(libdir) $(DESTDIR)$(bindir)
+
+
+mostlyclean-generic:
+
+clean-generic:
+
+distclean-generic:
+ -rm -f Makefile $(CONFIG_CLEAN_FILES)
+ -rm -f config.cache config.log stamp-h stamp-h[0-9]*
+
+maintainer-clean-generic:
+mostlyclean-am: mostlyclean-libLTLIBRARIES \
+ mostlyclean-noinstLTLIBRARIES mostlyclean-compile \
+ mostlyclean-libtool mostlyclean-binPROGRAMS \
+ mostlyclean-tags mostlyclean-generic
+
+mostlyclean: mostlyclean-am
+
+clean-am: clean-libLTLIBRARIES clean-noinstLTLIBRARIES clean-compile \
+ clean-libtool clean-binPROGRAMS clean-tags \
+ clean-generic mostlyclean-am
+
+clean: clean-am
+
+distclean-am: distclean-libLTLIBRARIES distclean-noinstLTLIBRARIES \
+ distclean-compile distclean-libtool \
+ distclean-binPROGRAMS distclean-tags distclean-generic \
+ clean-am
+ -rm -f libtool
+
+distclean: distclean-am
+
+maintainer-clean-am: maintainer-clean-libLTLIBRARIES \
+ maintainer-clean-noinstLTLIBRARIES \
+ maintainer-clean-compile maintainer-clean-libtool \
+ maintainer-clean-binPROGRAMS maintainer-clean-tags \
+ maintainer-clean-generic distclean-am
+ @echo "This command is intended for maintainers to use;"
+ @echo "it deletes files that may require special tools to rebuild."
+
+maintainer-clean: maintainer-clean-am
+
+.PHONY: mostlyclean-libLTLIBRARIES distclean-libLTLIBRARIES \
+clean-libLTLIBRARIES maintainer-clean-libLTLIBRARIES \
+uninstall-libLTLIBRARIES install-libLTLIBRARIES \
+mostlyclean-noinstLTLIBRARIES distclean-noinstLTLIBRARIES \
+clean-noinstLTLIBRARIES maintainer-clean-noinstLTLIBRARIES \
+mostlyclean-compile distclean-compile clean-compile \
+maintainer-clean-compile mostlyclean-libtool distclean-libtool \
+clean-libtool maintainer-clean-libtool mostlyclean-binPROGRAMS \
+distclean-binPROGRAMS clean-binPROGRAMS maintainer-clean-binPROGRAMS \
+uninstall-binPROGRAMS install-binPROGRAMS tags mostlyclean-tags \
+distclean-tags clean-tags maintainer-clean-tags distdir info-am info \
+dvi-am dvi check check-am installcheck-am installcheck install-exec-am \
+install-exec install-data-am install-data install-am install \
+uninstall-am uninstall all-redirect all-am all installdirs \
+mostlyclean-generic distclean-generic clean-generic \
+maintainer-clean-generic clean mostlyclean distclean maintainer-clean
+
+
+# Tell versions [3.59,3.63) of GNU make to not export all variables.
+# Otherwise a system limit (for SysV at least) may be exceeded.
+.NOEXPORT:
Modified: trunk/orca/packages/rrdtool-1.0.7.2/src/getopt.c
==============================================================================
--- trunk/orca/packages/rrdtool-1.0.7.2/src/getopt.c (original)
+++ trunk/orca/packages/rrdtool-1.0.7.2/src/getopt.c Sat Jul 13 19:22:32 2002
@@ -198,12 +198,14 @@
/* Value of POSIXLY_CORRECT environment variable. */
static char *posixly_correct;
+/* we must include string as there are warnings without it ... */
+#include <string.h>
+
#ifdef __GNU_LIBRARY__
/* We want to avoid inclusion of string.h with non-GNU libraries
because there are many ways it can cause trouble.
On some systems, it contains special magic macros that don't work
in GCC. */
-#include <string.h>
#define my_index strchr
#else
@@ -645,7 +647,7 @@
optarg = nameend + 1;
else
{
- if (opterr)
+ if (opterr) {
if (argv[optind - 1][1] == '-')
/* --option */
fprintf (stderr,
@@ -656,7 +658,7 @@
fprintf (stderr,
_("%s: option `%c%s' doesn't allow an argument\n"),
argv[0], argv[optind - 1][0], pfound->name);
-
+ }
nextchar += strlen (nextchar);
optopt = pfound->val;
Modified: trunk/orca/packages/rrdtool-1.0.7.2/src/rrd_format.h
==============================================================================
--- trunk/orca/packages/rrdtool-1.0.7.2/src/rrd_format.h (original)
+++ trunk/orca/packages/rrdtool-1.0.7.2/src/rrd_format.h Sat Jul 13 19:22:33 2002
@@ -1,5 +1,5 @@
/*****************************************************************************
- * RRDTOOL 0.99.31 Copyright Tobias Oetiker, 1997, 1998, 1999
+ * RRDtool Copyright Tobias Oetiker, 1997, 1998, 1999
*****************************************************************************
* rrd_format.h RRD Database Format header
*****************************************************************************/
@@ -22,13 +22,19 @@
#if defined(WIN32) || defined(_HPUX_SOURCE)
#define DNAN ((double)fmod(0.0,0.0))
+#define DINF ((double)log(0.0))
#else
#define DNAN ((double)(0.0/0.0)) /* we use a DNAN to
* represent the UNKNOWN
* */
+#define DINF ((double)(1.0/0.0)) /* we use a DINF to
+ * represent a value at the upper or
+ * lower border of the graph ...
+ * */
#endif
+
typedef double rrd_value_t; /* the data storage type is
* double */
@@ -268,6 +274,7 @@
pdp_prep_t *pdp_prep; /* pdp data prep area */
cdp_prep_t *cdp_prep; /* cdp prep area */
rra_ptr_t *rra_ptr; /* list of rra pointers */
+ rrd_value_t *rrd_value; /* list of rrd values */
} rrd_t;
/****************************************************************************
Modified: trunk/orca/packages/rrdtool-1.0.7.2/src/rrd_resize.c
==============================================================================
--- trunk/orca/packages/rrdtool-1.0.7.2/src/rrd_resize.c (original)
+++ trunk/orca/packages/rrdtool-1.0.7.2/src/rrd_resize.c Sat Jul 13 19:22:33 2002
@@ -1,5 +1,5 @@
/*****************************************************************************
- * RRDTOOL 0.99.31 Copyright Tobias Oetiker, 1997, 1998, 1999
+ * RRDtool Copyright Tobias Oetiker, 1997, 1998, 1999
*****************************************************************************
* rrd_resize.c Alters size of an RRA
*****************************************************************************
Modified: trunk/orca/packages/rrdtool-1.0.7.2/src/rrd_graph.c
==============================================================================
--- trunk/orca/packages/rrdtool-1.0.7.2/src/rrd_graph.c (original)
+++ trunk/orca/packages/rrdtool-1.0.7.2/src/rrd_graph.c Sat Jul 13 19:22:33 2002
@@ -1,13 +1,17 @@
/****************************************************************************
- * RRDTOOL 0.99.31 Copyright Tobias Oetiker, 1997,1998, 1999
+ * RRDtool Copyright Tobias Oetiker, 1997,1998, 1999
****************************************************************************
* rrd__graph.c make creates ne rrds
****************************************************************************/
#include "rrd_tool.h"
#include <gd.h>
-#include <gdfonts.h>
-#include <gdfontmb.h>
+#include <gdlucidan10.h>
+#include <gdlucidab12.h>
+#include <sys/stat.h>
+
+#define SmallFont gdLucidaNormal10
+#define LargeFont gdLucidaBold12
/* #define DEBUG */
@@ -26,10 +30,15 @@
enum gf_en {GF_PRINT=0,GF_GPRINT,GF_COMMENT,GF_HRULE,GF_VRULE,GF_LINE1,
GF_LINE2,GF_LINE3,GF_AREA,GF_STACK, GF_DEF, GF_CDEF };
-enum op_en {OP_NUMBER=0,OP_VARIABLE,OP_ADD,OP_SUB,OP_MUL,OP_DIV,OP_SIN,
+enum op_en {OP_NUMBER=0,OP_VARIABLE,OP_INF,OP_NEGINF,
+ OP_UNKN,OP_NOW,OP_TIME,OP_ADD,OP_MOD,
+ OP_SUB,OP_MUL,
+ OP_DIV,OP_SIN, OP_DUP, OP_EXC, OP_POP,
OP_COS,OP_LOG,OP_EXP,OP_LT,OP_LE,OP_GT,OP_GE,OP_EQ,OP_IF,
OP_UN,OP_END};
+enum if_en {IF_GIF=0,IF_PNG=1};
+
typedef struct rpnp_t {
enum op_en op;
double val; /* value for a OP_NUMBER */
@@ -131,8 +140,11 @@
/* this structure describes the elements which can make up a graph.
because they are quite diverse, not all elements will use all the
possible parts of the structure. */
-
-#define FMT_LEG_LEN 120
+#ifdef HAVE_SNPRINTF
+#define FMT_LEG_LEN 200
+#else
+#define FMT_LEG_LEN 2000
+#endif
typedef struct graph_desc_t {
enum gf_en gf; /* graphing function */
@@ -145,13 +157,13 @@
col_trip_t col; /* graph color */
char format[FMT_LEG_LEN+5]; /* format for PRINT AND GPRINT */
char legend[FMT_LEG_LEN+5]; /* legend*/
- gdPoint legloc; /* location of legend */
+ gdPoint legloc; /* location of legend */
double yrule; /* value for y rule line */
time_t xrule; /* value for x rule line */
- rpnp_t *rpnp; /* instructions for CDEF function */
+ rpnp_t *rpnp; /* instructions for CDEF function */
/* description of data fetched for the graph element */
- time_t start,end; /* first and last data element */
+ time_t start,end; /* timestaps for first and last data element */
unsigned long step; /* time between samples */
unsigned long ds_cnt; /* how many data sources are there in the fetch */
long data_first; /* first pointer to this data */
@@ -161,14 +173,16 @@
} graph_desc_t;
+#define ALTYGRID 0x01 /* use alternative y grid algorithm */
+#define ALTAUTOSCALE 0x02 /* use alternative algorithm to find lower and upper bounds */
typedef struct image_desc_t {
/* configuration of graph */
- char graphfile[255]; /* filename for graphic */
+ char graphfile[1024]; /* filename for graphic */
long xsize,ysize; /* graph area size in pixels */
- col_trip_t graph_col[__GRC_END__]; /* real colors for the graph */
+ col_trip_t graph_col[__GRC_END__]; /* real colors for the graph */
char ylegend[200]; /* legend along the yaxis */
char title[200]; /* title for graph */
xlab_t xlab_user; /* user defined labeling for xaxis */
@@ -181,19 +195,25 @@
rrd_value_t minval,maxval; /* extreme values in the data */
int rigid; /* do not expand range even with
values outside */
+ char* imginfo; /* construct an <IMG ... tag and return
+ as first retval */
+ int lazy; /* only update the gif if there is reasonable
+ probablility that the existing one is out of date */
int logarithmic; /* scale the yaxis logarithmic */
-
+ enum if_en imgformat; /* image format */
+
/* status information */
-
+
long xorigin,yorigin;/* where is (0,0) of the graph */
long xgif,ygif; /* total size of the gif */
int interlaced; /* will the graph be interlaced? */
double magfact; /* numerical magnitude*/
long base; /* 1000 or 1024 depending on what we graph */
char symbol; /* magnitude symbol for y-axis */
-
+ int extra_flags; /* flags for boolean options */
/* data elements */
+ long prt_c; /* number of print elements */
long gdes_c; /* number of graphics elements */
graph_desc_t *gdes; /* points to an array of graph elements */
@@ -202,7 +222,7 @@
-/* translate time values into x coordinates */
+/* translate time values into x coordinates */
/*#define xtr(x) (int)((double)im->xorigin \
+ ((double) im->xsize / (double)(im->end - im->start) ) \
* ((double)(x) - im->start)+0.5) */
@@ -283,6 +303,14 @@
return (-1);
}
+enum if_en if_conv(char *string){
+
+ conv_if(GIF,IF_GIF)
+ conv_if(PNG,IF_PNG)
+
+ return (-1);
+}
+
enum tmt_en tmt_conv(char *string){
conv_if(SECOND,TMT_SECOND)
@@ -336,31 +364,74 @@
return 0;
}
+/* find SI magnitude symbol for the given number*/
+void
+auto_scale(
+ image_desc_t *im, /* image description */
+ double *value,
+ char **symb_ptr,
+ double *magfact
+ )
+{
+
+ char *symbol[] = {"a", /* 10e-18 Ato */
+ "f", /* 10e-15 Femto */
+ "p", /* 10e-12 Pico */
+ "n", /* 10e-9 Nano */
+ "u", /* 10e-6 Micro */
+ "m", /* 10e-3 Milli */
+ " ", /* Base */
+ "k", /* 10e3 Kilo */
+ "M", /* 10e6 Mega */
+ "G", /* 10e9 Giga */
+ "T", /* 10e12 Terra */
+ "P", /* 10e15 Peta */
+ "E"};/* 10e18 Exa */
+
+ int symbcenter = 6;
+ int index;
+
+ if (*value == 0.0 || isnan(*value) ) {
+ index = 0;
+ *magfact = 1.0;
+ } else {
+ index = floor(log(fabs(*value))/log(im->base));
+ *magfact = pow(im->base, index);
+ (*value) /= (*magfact);
+ }
+ if ( index <= symbcenter && index >= -symbcenter) {
+ (*symb_ptr) = symbol[index+symbcenter];
+ }
+ else {
+ (*symb_ptr) = "?";
+ }
+}
+
+
/* find SI magnitude symbol for the numbers on the y-axis*/
void
si_unit(
image_desc_t *im /* image description */
)
{
-
+
char symbol[] = {'a', /* 10e-18 Ato */
- 'f', /* 10e-15 Femto */
- 'p', /* 10e-12 Pico */
- 'n', /* 10e-9 Nano */
- 'µ', /* 10e-6 Micro */
- 'm', /* 10e-3 Milli */
- ' ', /* Base */
- 'k', /* 10e3 Kilo */
- 'M', /* 10e6 Mega */
- 'G', /* 10e9 Giga */
- 'T', /* 10e12 Terra */
- 'P', /* 10e15 Peta */
- 'E'};/* 10e18 Exa */
+ 'f', /* 10e-15 Femto */
+ 'p', /* 10e-12 Pico */
+ 'n', /* 10e-9 Nano */
+ 'u', /* 10e-6 Micro */
+ 'm', /* 10e-3 Milli */
+ ' ', /* Base */
+ 'k', /* 10e3 Kilo */
+ 'M', /* 10e6 Mega */
+ 'G', /* 10e9 Giga */
+ 'T', /* 10e12 Terra */
+ 'P', /* 10e15 Peta */
+ 'E'};/* 10e18 Exa */
int symbcenter = 6;
double digits;
-
digits = floor( log( max( fabs(im->minval),fabs(im->maxval)))/log(im->base));
im->magfact = pow(im->base , digits);
#ifdef DEBUG
@@ -388,36 +459,57 @@
0.8,0.7,0.6,0.5,0.4,0.3,0.2,0.1,0.0,-1};
double scaled_min,scaled_max;
+ double adj;
int i;
- scaled_min = im->minval / im->magfact;
- scaled_max = im->maxval / im->magfact;
-
+
#ifdef DEBUG
printf("Min: %6.2f Max: %6.2f MagFactor: %6.2f\n",
im->minval,im->maxval,im->magfact);
#endif
-
- for (i=1; sensiblevalues[i] > 0; i++){
- if (sensiblevalues[i-1]>=scaled_min &&
- sensiblevalues[i]<=scaled_min)
- im->minval = sensiblevalues[i]*(im->magfact);
-
- if (-sensiblevalues[i-1]<=scaled_min &&
- -sensiblevalues[i]>=scaled_min)
- im->minval = -sensiblevalues[i-1]*(im->magfact);
-
- if (sensiblevalues[i-1] >= scaled_max &&
- sensiblevalues[i] <= scaled_max)
- im->maxval = sensiblevalues[i-1]*(im->magfact);
-
- if (-sensiblevalues[i-1]<=scaled_max &&
- -sensiblevalues[i] >=scaled_max)
- im->maxval = -sensiblevalues[i]*(im->magfact);
+
+ if (isnan(im->ygridstep)){
+ if(im->extra_flags & ALTAUTOSCALE) {
+ /* measure the amplitude of the function. Make sure that
+ graph boundaries are slightly higher then max/min vals
+ so we can see amplitude on the graph */
+ adj = (im->maxval - im->minval) * 0.1;
+ im->minval -= adj;
+ im->maxval += adj;
+ }
+ else {
+ scaled_min = im->minval / im->magfact;
+ scaled_max = im->maxval / im->magfact;
+
+ for (i=1; sensiblevalues[i] > 0; i++){
+ if (sensiblevalues[i-1]>=scaled_min &&
+ sensiblevalues[i]<=scaled_min)
+ im->minval = sensiblevalues[i]*(im->magfact);
+
+ if (-sensiblevalues[i-1]<=scaled_min &&
+ -sensiblevalues[i]>=scaled_min)
+ im->minval = -sensiblevalues[i-1]*(im->magfact);
+
+ if (sensiblevalues[i-1] >= scaled_max &&
+ sensiblevalues[i] <= scaled_max)
+ im->maxval = sensiblevalues[i-1]*(im->magfact);
+
+ if (-sensiblevalues[i-1]<=scaled_max &&
+ -sensiblevalues[i] >=scaled_max)
+ im->maxval = -sensiblevalues[i]*(im->magfact);
+ }
+ }
+ } else {
+ /* adjust min and max to the grid definition if there is one */
+ im->minval = (double)im->ylabfact * im->ygridstep *
+ floor(im->minval / ((double)im->ylabfact * im->ygridstep));
+ im->maxval = (double)im->ylabfact * im->ygridstep *
+ ceil(im->maxval /( (double)im->ylabfact * im->ygridstep));
}
+
#ifdef DEBUG
- printf("SCALED Min: %6.2f Max: %6.2f Factor: %6.2f\n",
+ fprintf(stderr,"SCALED Min: %6.2f Max: %6.2f Factor: %6.2f\n",
im->minval,im->maxval,im->magfact);
#endif
}
@@ -431,38 +523,46 @@
time_t *start,
time_t *end, /* which time frame do you want ?
* will be changed to represent reality */
- unsigned long *step, /* step the data currently is in */
+ unsigned long *step, /* desired step size. Will be adjusted to new increassed step size */
unsigned long *ds_cnt, /* number of data sources in file */
rrd_value_t **data) /* two dimensional array containing the data */
{
- int i,reduce_factor = ceil((double)*step / cur_step);
- unsigned long src_row,trg_row,col,row_cnt,start_offset,rowadjust;
- row_cnt = (*end-*start)/cur_step;
+ int i,reduce_factor = ceil((double)(*step) / (double)cur_step);
+ unsigned long src_row,trg_row,col,row_cnt,start_offset,skiprows=0;
- *step = cur_step*reduce_factor;
+ (*step) = cur_step*reduce_factor; /* set new step size for reduced data */
/* adjust the start time so that it is a multiple of the new steptime */
+
+ row_cnt = ((*end)-(*start))/cur_step+1; /* +1 because start and end are pointers to first and last entry */
- start_offset = *start % *step;
+ start_offset = (*start) % (*step);
+ /* move the start pointer into the past so that the reduced data set
+ fully covers the timespan of the new dataset */
+ (*start) -= start_offset;
+
+ trg_row=0;
+ /* skip the first <skiprows> of original data because we already covered
+ by the row of *unknown* data added to the reduced dataset */
+
+ skiprows = ((*step) - start_offset) / cur_step;
+
if (start_offset > 0) {
- start_offset = *step - start_offset;
- *start = *start + start_offset - *step;
- /* we don't have any data for the first row, so fill it with
- *UNKNOWN* */
- for (col=0;col<*ds_cnt;col++)
+ /* we don't have full data for the first row, so we'll ditch
+ what is there and fill it with *UNKNOWN* */
+ for (col=0;col<(*ds_cnt);col++) {
(*data)[col] = DNAN;
- }
-
-
- rowadjust = start_offset / cur_step;
-
- trg_row=0;
+ }
+ trg_row++; /* one row of target filled already */
+
+ };
+
- for (src_row = rowadjust; src_row <= row_cnt-reduce_factor; src_row+=reduce_factor) {
- for (col=0;col<*ds_cnt;col++){
+ for (src_row = skiprows; src_row < row_cnt; src_row+=reduce_factor) {
+ for (col=0;col<(*ds_cnt);col++){
double newval=0;
unsigned long validval=0;
- for (i=0;i<reduce_factor && src_row<row_cnt;i += reduce_factor) {
- unsigned long ptr = (src_row+i)* *ds_cnt+col;
+ for (i=0;i<reduce_factor && src_row+i<row_cnt;i++) {
+ unsigned long ptr = (src_row+i)* (*ds_cnt)+col;
if (isnan((*data)[ptr])) continue; /* we can't help with NAN */
validval++;
switch (cf) {
@@ -491,12 +591,13 @@
break;
}
}
- (*data)[(trg_row)* *ds_cnt+col] = newval;
+ (*data)[(trg_row)* (*ds_cnt)+col] = newval;
}
trg_row++;
}
- *end = *start+ *step *trg_row;
- for (col=0;col<*ds_cnt;col++){
+ *end = (*start) + (*step) * (trg_row);
+ /* make sure there is some NAN at the end of the graph */
+ for (col=0;col<(*ds_cnt);col++){
(*data)[(trg_row)* *ds_cnt+col] = DNAN;
}
}
@@ -629,22 +730,16 @@
rpnp=NULL;
while(*expr){
- if ((rpnp = (rpnp_t *) realloc(rpnp, (++steps + 2)*
+ if ((rpnp = (rpnp_t *) rrd_realloc(rpnp, (++steps + 2)*
sizeof(rpnp_t)))==NULL){
return NULL;
}
-
- if(sscanf(expr,"%lf%n",&rpnp[steps].val,&pos) == 1){
+
+ else if(sscanf(expr,"%lf%n",&rpnp[steps].val,&pos) == 1){
rpnp[steps].op = OP_NUMBER;
expr+=pos;
}
- else if ((sscanf(expr,"%29[_A-Za-z0-9]%n",
- vname,&pos) == 1)
- && ((rpnp[steps].ptr = find_var(im,vname)) != -1)){
- rpnp[steps].op = OP_VARIABLE;
- expr+=pos;
- }
-
+
#define match_op(VV,VVV) \
else if (strncmp(expr, #VVV, strlen(#VVV))==0){ \
rpnp[steps].op = VV; \
@@ -655,20 +750,38 @@
match_op(OP_SUB,-)
match_op(OP_MUL,*)
match_op(OP_DIV,/)
+ match_op(OP_MOD,%)
match_op(OP_SIN,SIN)
match_op(OP_COS,COS)
match_op(OP_LOG,LOG)
match_op(OP_EXP,EXP)
+ match_op(OP_DUP,DUP)
+ match_op(OP_EXC,EXC)
+ match_op(OP_POP,POP)
match_op(OP_LT,LT)
match_op(OP_LE,LE)
match_op(OP_GT,GT)
match_op(OP_GE,GE)
match_op(OP_EQ,EQ)
match_op(OP_IF,IF)
+ /* order is important here ! .. match longest first */
+ match_op(OP_UNKN,UNKN)
match_op(OP_UN,UN)
-
+ match_op(OP_NEGINF,NEGINF)
+ match_op(OP_INF,INF)
+ match_op(OP_NOW,NOW)
+ match_op(OP_TIME,TIME)
+
#undef match_op
+
+ else if ((sscanf(expr,"%29[_A-Za-z0-9]%n",
+ vname,&pos) == 1)
+ && ((rpnp[steps].ptr = find_var(im,vname)) != -1)){
+ rpnp[steps].op = OP_VARIABLE;
+ expr+=pos;
+ }
+
else {
free(rpnp);
return NULL;
@@ -687,7 +800,7 @@
}
-#define dc_stacksize 30
+#define dc_stackblock 100
/* run the rpn calculator on all the CDEF arguments */
@@ -699,6 +812,8 @@
long *steparray;
int stepcnt;
time_t now;
+ double *stack = NULL;
+ long dc_stacksize = 0;
for (gdi=0;gdi<im->gdes_c;gdi++){
/* only GF_CDEF elements are of interest */
@@ -720,8 +835,9 @@
for(rpi=0;im->gdes[gdi].rpnp[rpi].op != OP_END;rpi++){
if(im->gdes[gdi].rpnp[rpi].op == OP_VARIABLE){
long ptr = im->gdes[gdi].rpnp[rpi].ptr;
- if ((steparray = realloc(steparray, (++stepcnt+1)*sizeof(double)))==NULL){
+ if ((steparray = rrd_realloc(steparray, (++stepcnt+1)*sizeof(double)))==NULL){
rrd_set_error("realloc steparray");
+ free(stack);
return -1;
};
@@ -751,6 +867,7 @@
}
if(steparray == NULL){
rrd_set_error("rpn expressions without variables are not supported");
+ free(stack);
return -1;
}
steparray[stepcnt]=0;
@@ -767,6 +884,7 @@
/ im->gdes[gdi].step +1)
* sizeof(double)))==NULL){
rrd_set_error("malloc im->gdes[gdi].data");
+ free(stack);
return -1;
}
@@ -774,23 +892,22 @@
for (now = im->gdes[gdi].start;
now<=im->gdes[gdi].end;
now += im->gdes[gdi].step){
- double stack[dc_stacksize];
- int stptr=-1;
+ long stptr=-1;
/* process each op from the rpn in turn */
for (rpi=0;im->gdes[gdi].rpnp[rpi].op != OP_END;rpi++){
- switch (im->gdes[gdi].rpnp[rpi].op){
- case OP_NUMBER:
- if(stptr>=dc_stacksize){
+ if (stptr +5 > dc_stacksize){
+ dc_stacksize += dc_stackblock;
+ stack = rrd_realloc(stack,dc_stacksize*sizeof(long));
+ if (stack==NULL){
rrd_set_error("RPN stack overflow");
return -1;
}
+ }
+ switch (im->gdes[gdi].rpnp[rpi].op){
+ case OP_NUMBER:
stack[++stptr] = im->gdes[gdi].rpnp[rpi].val;
break;
case OP_VARIABLE:
- if(stptr>=dc_stacksize){
- rrd_set_error("RPN stack overflow");
- return -1;
- }
/* make sure we pull the correct value from the *.data array */
/* adjust the pointer into the array acordingly. */
if(now > im->gdes[gdi].start &&
@@ -800,9 +917,25 @@
}
stack[++stptr] = *im->gdes[gdi].rpnp[rpi].data;
break;
+ case OP_UNKN:
+ stack[++stptr] = DNAN;
+ break;
+ case OP_INF:
+ stack[++stptr] = DINF;
+ break;
+ case OP_NEGINF:
+ stack[++stptr] = -DINF;
+ break;
+ case OP_NOW:
+ stack[++stptr] = (double)time(NULL);
+ break;
+ case OP_TIME:
+ stack[++stptr] = (double)now;
+ break;
case OP_ADD:
if(stptr<1){
rrd_set_error("RPN stack underflow");
+ free(stack);
return -1;
}
stack[stptr-1] = stack[stptr-1] + stack[stptr];
@@ -811,6 +944,7 @@
case OP_SUB:
if(stptr<1){
rrd_set_error("RPN stack underflow");
+ free(stack);
return -1;
}
stack[stptr-1] = stack[stptr-1] - stack[stptr];
@@ -819,6 +953,7 @@
case OP_MUL:
if(stptr<1){
rrd_set_error("RPN stack underflow");
+ free(stack);
return -1;
}
stack[stptr-1] = stack[stptr-1] * stack[stptr];
@@ -827,14 +962,25 @@
case OP_DIV:
if(stptr<1){
rrd_set_error("RPN stack underflow");
+ free(stack);
return -1;
}
stack[stptr-1] = stack[stptr-1] / stack[stptr];
stptr--;
break;
+ case OP_MOD:
+ if(stptr<1){
+ rrd_set_error("RPN stack underflow");
+ free(stack);
+ return -1;
+ }
+ stack[stptr-1] = fmod(stack[stptr-1],stack[stptr]);
+ stptr--;
+ break;
case OP_SIN:
if(stptr<0){
rrd_set_error("RPN stack underflow");
+ free(stack);
return -1;
}
stack[stptr] = sin(stack[stptr]);
@@ -842,6 +988,7 @@
case OP_COS:
if(stptr<0){
rrd_set_error("RPN stack underflow");
+ free(stack);
return -1;
}
stack[stptr] = cos(stack[stptr]);
@@ -849,13 +996,44 @@
case OP_LOG:
if(stptr<0){
rrd_set_error("RPN stack underflow");
+ free(stack);
return -1;
}
stack[stptr] = log(stack[stptr]);
break;
+ case OP_DUP:
+ if(stptr<0){
+ rrd_set_error("RPN stack underflow");
+ free(stack);
+ return -1;
+ }
+ stack[stptr+1] = stack[stptr];
+ stptr++;
+ break;
+ case OP_POP:
+ if(stptr<0){
+ rrd_set_error("RPN stack underflow");
+ free(stack);
+ return -1;
+ }
+ stptr--;
+ break;
+ case OP_EXC:
+ if(stptr<1){
+ rrd_set_error("RPN stack underflow");
+ free(stack);
+ return -1;
+ } else {
+ double dummy;
+ dummy = stack[stptr] ;
+ stack[stptr] = stack[stptr-1];
+ stack[stptr-1] = dummy;
+ }
+ break;
case OP_EXP:
if(stptr<0){
rrd_set_error("RPN stack underflow");
+ free(stack);
return -1;
}
stack[stptr] = exp(stack[stptr]);
@@ -863,6 +1041,7 @@
case OP_LT:
if(stptr<1){
rrd_set_error("RPN stack underflow");
+ free(stack);
return -1;
}
stack[stptr-1] = stack[stptr-1] < stack[stptr] ? 1.0 : 0.0;
@@ -871,6 +1050,7 @@
case OP_LE:
if(stptr<1){
rrd_set_error("RPN stack underflow");
+ free(stack);
return -1;
}
stack[stptr-1] = stack[stptr-1] <= stack[stptr] ? 1.0 : 0.0;
@@ -879,6 +1059,7 @@
case OP_GT:
if(stptr<1){
rrd_set_error("RPN stack underflow");
+ free(stack);
return -1;
}
stack[stptr-1] = stack[stptr-1] > stack[stptr] ? 1.0 : 0.0;
@@ -887,6 +1068,7 @@
case OP_GE:
if(stptr<1){
rrd_set_error("RPN stack underflow");
+ free(stack);
return -1;
}
stack[stptr-1] = stack[stptr-1] >= stack[stptr] ? 1.0 : 0.0;
@@ -895,6 +1077,7 @@
case OP_EQ:
if(stptr<1){
rrd_set_error("RPN stack underflow");
+ free(stack);
return -1;
}
stack[stptr-1] = stack[stptr-1] == stack[stptr] ? 1.0 : 0.0;
@@ -903,6 +1086,7 @@
case OP_IF:
if(stptr<2){
rrd_set_error("RPN stack underflow");
+ free(stack);
return -1;
}
stack[stptr-2] = stack[stptr-2] != 0.0 ? stack[stptr-1] : stack[stptr];
@@ -912,6 +1096,7 @@
case OP_UN:
if(stptr<0){
rrd_set_error("RPN stack underflow");
+ free(stack);
return -1;
}
stack[stptr] = isnan(stack[stptr]) ? 1.0 : 0.0;
@@ -922,13 +1107,16 @@
}
if(stptr!=0){
rrd_set_error("RPN final stack size != 1");
+ free(stack);
return -1;
}
im->gdes[gdi].data[++dataidx] = stack[0];
}
}
+ free(stack);
return 0;
}
+
#undef dc_stacksize
/* massage data so, that we get one value for each x coordinate in the graph */
@@ -965,6 +1153,7 @@
paintval=0.0;
for(ii=0;ii<im->gdes_c;ii++){
+ double value;
switch(im->gdes[ii].gf){
case GF_LINE1:
case GF_LINE2:
@@ -973,21 +1162,27 @@
paintval = 0.0;
case GF_STACK:
vidx = im->gdes[ii].vidx;
- paintval +=
+
+ value =
im->gdes[vidx].data[
- (unsigned long)(
- (double)(gr_time
- - im->gdes[vidx].start + im->gdes[vidx].step
- )
- / (double)im->gdes[vidx].step
- )
-
- *im->gdes[vidx].ds_cnt
- +im->gdes[vidx].ds];
+ ((unsigned long)floor(
+ (gr_time - im->gdes[vidx].start )
+ / im->gdes[vidx].step)+1)
+
+ /* added one because data was not being aligned properly
+ this fixes it. We may also be having a problem in fetch ... */
+
+ *im->gdes[vidx].ds_cnt
+ +im->gdes[vidx].ds];
+
+ if (! isnan(value)) {
+ paintval += value;
+ im->gdes[ii].p_data[i] = paintval;
+ } else {
+ im->gdes[ii].p_data[i] = DNAN;
+ }
- im->gdes[ii].p_data[i] = paintval;
-
- if (! isnan(paintval)){
+ if (finite(paintval)){
if (isnan(minval) || paintval < minval)
minval = paintval;
if (isnan(maxval) || paintval > maxval)
@@ -1053,48 +1248,48 @@
long basestep /* how many if these do we jump a time */
)
{
- struct tm *tm;
- tm = localtime(&start);
+ struct tm tm;
+ tm = *localtime(&start);
switch(baseint){
case TMT_SECOND:
- tm->tm_sec -= tm->tm_sec % basestep; break;
+ tm.tm_sec -= tm.tm_sec % basestep; break;
case TMT_MINUTE:
- tm->tm_sec=0;
- tm->tm_min -= tm->tm_min % basestep;
+ tm.tm_sec=0;
+ tm.tm_min -= tm.tm_min % basestep;
break;
case TMT_HOUR:
- tm->tm_sec=0;
- tm->tm_min = 0;
- tm->tm_hour -= tm->tm_hour % basestep; break;
+ tm.tm_sec=0;
+ tm.tm_min = 0;
+ tm.tm_hour -= tm.tm_hour % basestep; break;
case TMT_DAY:
/* we do NOT look at the basestep for this ... */
- tm->tm_sec=0;
- tm->tm_min = 0;
- tm->tm_hour = 0; break;
+ tm.tm_sec=0;
+ tm.tm_min = 0;
+ tm.tm_hour = 0; break;
case TMT_WEEK:
/* we do NOT look at the basestep for this ... */
- tm->tm_sec=0;
- tm->tm_min = 0;
- tm->tm_hour = 0;
- tm->tm_mday -= tm->tm_wday -1 ; break; /* -1 because we want the monday */
- if (tm->tm_wday==0) tm->tm_mday -= 7; /* we want the *previous* monday */
+ tm.tm_sec=0;
+ tm.tm_min = 0;
+ tm.tm_hour = 0;
+ tm.tm_mday -= tm.tm_wday -1 ; break; /* -1 because we want the monday */
+ if (tm.tm_wday==0) tm.tm_mday -= 7; /* we want the *previous* monday */
case TMT_MONTH:
- tm->tm_sec=0;
- tm->tm_min = 0;
- tm->tm_hour = 0;
- tm->tm_mday = 1;
- tm->tm_mon -= tm->tm_mon % basestep; break;
+ tm.tm_sec=0;
+ tm.tm_min = 0;
+ tm.tm_hour = 0;
+ tm.tm_mday = 1;
+ tm.tm_mon -= tm.tm_mon % basestep; break;
case TMT_YEAR:
- tm->tm_sec=0;
- tm->tm_min = 0;
- tm->tm_hour = 0;
- tm->tm_mday = 1;
- tm->tm_mon = 0;
- tm->tm_year -= (tm->tm_year+1900) % basestep;
+ tm.tm_sec=0;
+ tm.tm_min = 0;
+ tm.tm_hour = 0;
+ tm.tm_mday = 1;
+ tm.tm_mon = 0;
+ tm.tm_year -= (tm.tm_year+1900) % basestep;
}
- return mktime(tm);
+ return mktime(&tm);
}
/* identify the point where the next gridline, label ... gets placed */
time_t
@@ -1104,27 +1299,27 @@
long basestep /* how many if these do we jump a time */
)
{
- struct tm *tm;
+ struct tm tm;
time_t madetime;
- tm = localtime(¤t);
+ tm = *localtime(¤t);
do {
switch(baseint){
case TMT_SECOND:
- tm->tm_sec += basestep; break;
+ tm.tm_sec += basestep; break;
case TMT_MINUTE:
- tm->tm_min += basestep; break;
+ tm.tm_min += basestep; break;
case TMT_HOUR:
- tm->tm_hour += basestep; break;
+ tm.tm_hour += basestep; break;
case TMT_DAY:
- tm->tm_mday += basestep; break;
+ tm.tm_mday += basestep; break;
case TMT_WEEK:
- tm->tm_mday += 7*basestep; break;
+ tm.tm_mday += 7*basestep; break;
case TMT_MONTH:
- tm->tm_mon += basestep; break;
+ tm.tm_mon += basestep; break;
case TMT_YEAR:
- tm->tm_year += basestep;
+ tm.tm_year += basestep;
}
- madetime = mktime(tm);
+ madetime = mktime(&tm);
} while (madetime == -1); /* this is necessary to skip impssible times
like the daylight saving time skips */
return madetime;
@@ -1177,12 +1372,16 @@
int graphelement = 0;
long vidx;
int max_ii;
+ double magfact = -1;
+ char *si_symb = "";
+ char *percent_s;
int prlines = 1;
+ if (im->imginfo) prlines++;
for(i=0;i<im->gdes_c;i++){
switch(im->gdes[i].gf){
case GF_PRINT:
prlines++;
- if((*prdata = realloc(*prdata,prlines*sizeof(char *)))==NULL){
+ if(((*prdata) = rrd_realloc((*prdata),prlines*sizeof(char *)))==NULL){
rrd_set_error("realloc prdata");
return 0;
}
@@ -1197,14 +1396,14 @@
for(ii=im->gdes[vidx].ds;
ii < max_ii;
ii+=im->gdes[vidx].ds_cnt){
- if (isnan(im->gdes[vidx].data[ii]))
+ if (! finite(im->gdes[vidx].data[ii]))
continue;
if (isnan(printval)){
printval = im->gdes[vidx].data[ii];
validsteps++;
continue;
}
-
+
switch (im->gdes[i].cf){
case CF_AVERAGE:
validsteps++;
@@ -1220,17 +1419,45 @@
printval = im->gdes[vidx].data[ii];
}
}
- if (im->gdes[i].cf == CF_AVERAGE)
- if (validsteps > 1) {
- printval = (printval / validsteps);
- }
+ if (im->gdes[i].cf == CF_AVERAGE) {
+ if (validsteps > 1) {
+ printval = (printval / validsteps);
+ }
+ }
+ if ((percent_s = strstr(im->gdes[i].format,"%S")) != NULL) {
+ /* Magfact is set to -1 upon entry to print_calc. If it
+ * is still less than 0, then we need to run auto_scale.
+ * Otherwise, put the value into the correct units. If
+ * the value is 0, then do not set the symbol or magnification
+ * so next the calculation will be performed again. */
+ if (magfact < 0.0) {
+ auto_scale(im,&printval,&si_symb,&magfact);
+ if (printval == 0.0)
+ magfact = -1.0;
+ } else {
+ printval /= magfact;
+ }
+ *(++percent_s) = 's';
+ }
+ else if (strstr(im->gdes[i].format,"%s") != NULL) {
+ auto_scale(im,&printval,&si_symb,&magfact);
+ }
if (im->gdes[i].gf == GF_PRINT){
- (*prdata)[prlines-2] = malloc((strlen(im->gdes[i].format)+100)*sizeof(char));
- sprintf((*prdata)[prlines-2],im->gdes[i].format,printval);
+ (*prdata)[prlines-2] = malloc((FMT_LEG_LEN+2)*sizeof(char));
+#ifdef HAVE_SNPRINTF
+ snprintf((*prdata)[prlines-2],FMT_LEG_LEN,im->gdes[i].format,printval,si_symb);
+#else
+ sprintf((*prdata)[prlines-2],im->gdes[i].format,printval,si_symb);
+#endif
(*prdata)[prlines-1] = NULL;
} else {
/* GF_GPRINT */
- sprintf(im->gdes[i].legend,im->gdes[i].format,printval);
+
+#ifdef HAVE_SNPRINTF
+ snprintf(im->gdes[i].legend,FMT_LEG_LEN-2,im->gdes[i].format,printval,si_symb);
+#else
+ sprintf(im->gdes[i].legend,im->gdes[i].format,printval,si_symb);
+#endif
graphelement = 1;
}
break;
@@ -1242,9 +1469,10 @@
case GF_STACK:
case GF_HRULE:
case GF_VRULE:
+ graphelement = 1;
+ break;
case GF_DEF:
case GF_CDEF:
- graphelement = 1;
break;
}
}
@@ -1258,9 +1486,9 @@
{
/* graph labels */
- int interleg = gdFontSmall->w*2;
- int box = gdFontSmall->h*1.2;
- int border = gdFontSmall->w*2;
+ int interleg = SmallFont->w*2;
+ int box = SmallFont->h*1.2;
+ int border = SmallFont->w*2;
int fill=0, fill_last;
int leg_c = 0;
int leg_x = border, leg_y = im->ygif;
@@ -1289,7 +1517,7 @@
if (im->gdes[i].gf != GF_GPRINT &&
im->gdes[i].gf != GF_COMMENT)
fill += box;
- fill += leg_cc * gdFontSmall->w;
+ fill += leg_cc * SmallFont->w;
leg_c++;
}
@@ -1316,7 +1544,7 @@
leg_x = border;
if (leg_c >= 2 && prt_fctn == 'j') {
glue = (im->xgif - fill - 2* border) / (leg_c-1);
- /* if (glue > 2 * gdFontSmall->w) glue = 0; */
+ /* if (glue > 2 * SmallFont->w) glue = 0; */
} else {
glue = 0;
}
@@ -1329,21 +1557,21 @@
im->gdes[ii].legloc.x = leg_x;
im->gdes[ii].legloc.y = leg_y;
leg_x = leg_x
- + strlen(im->gdes[ii].legend)*gdFontSmall->w
+ + strlen(im->gdes[ii].legend)*SmallFont->w
+ interleg
+ glue;
if (im->gdes[ii].gf != GF_GPRINT &&
im->gdes[ii].gf != GF_COMMENT)
leg_x += box;
}
- leg_y = leg_y + gdFontSmall->h*1.2;
- if (prt_fctn == 's') leg_y += gdFontSmall->h *0.5;
+ leg_y = leg_y + SmallFont->h*1.2;
+ if (prt_fctn == 's') leg_y -= SmallFont->h *0.5;
fill = 0;
leg_c = 0;
mark = ii;
}
}
- im->ygif = leg_y+(border+3);
+ im->ygif = leg_y+6;
}
@@ -1361,7 +1589,9 @@
gdPoint polyPoints[4];
int labfact,gridind;
int styleMinor[2],styleMajor[2];
-
+ int decimals, fractionals;
+ char labfmt[64];
+
labfact=2;
gridind=-1;
range = im->maxval - im->minval;
@@ -1382,22 +1612,53 @@
/* find grid spaceing */
pixel=1;
if(isnan(im->ygridstep)){
- for(i=0;ylab[i].grid > 0;i++){
- pixel = im->ysize / (scaledrange / ylab[i].grid);
- if (gridind == -1 && pixel > 5) {
- gridind = i;
- break;
+ if(im->extra_flags & ALTYGRID) {
+ /* find the value with max number of digits. Get number of digits */
+ decimals = ceil(log10(max(fabs(im->maxval), fabs(im->minval))));
+ if(decimals <= 0) /* everything is small. make place for zero */
+ decimals = 1;
+
+ fractionals = floor(log10(range));
+ if(fractionals < 0) /* small amplitude. */
+ sprintf(labfmt, "%%%d.%df", decimals - fractionals + 1, -fractionals + 1);
+ else
+ sprintf(labfmt, "%%%d.1f", decimals + 1);
+ gridstep = pow(10, fractionals);
+ if(gridstep == 0) /* range is one -> 0.1 is reasonable scale */
+ gridstep = 0.1;
+ /* should have at least 5 lines but no more then 15 */
+ if(range/gridstep < 5)
+ gridstep /= 10;
+ if(range/gridstep > 15)
+ gridstep *= 10;
+ if(range/gridstep > 5) {
+ labfact = 1;
+ if(range/gridstep > 8)
+ labfact = 2;
+ }
+ else {
+ gridstep /= 5;
+ labfact = 5;
}
}
-
- for(i=0; i<4;i++) {
- if (pixel * ylab[gridind].lfac[i] >= 2 * gdFontSmall->h) {
- labfact = ylab[gridind].lfac[i];
- break;
- }
- }
-
- gridstep = ylab[gridind].grid * im->magfact;
+ else {
+ for(i=0;ylab[i].grid > 0;i++){
+ pixel = im->ysize / (scaledrange / ylab[i].grid);
+ if (gridind == -1 && pixel > 5) {
+ gridind = i;
+ break;
+ }
+ }
+
+ for(i=0; i<4;i++) {
+ if (pixel * ylab[gridind].lfac[i] >= 2 * SmallFont->h) {
+ labfact = ylab[gridind].lfac[i];
+ break;
+ }
+ }
+
+ gridstep = ylab[gridind].grid * im->magfact;
+ }
} else {
gridstep = im->ygridstep;
labfact = im->ylabfact;
@@ -1415,7 +1676,12 @@
if(i % labfact == 0){
if (i==0 || im->symbol == ' ') {
if(scaledstep < 1){
- sprintf(graph_label,"%4.1f",scaledstep*i);
+ if(im->extra_flags & ALTYGRID) {
+ sprintf(graph_label,labfmt,scaledstep*i);
+ }
+ else {
+ sprintf(graph_label,"%4.1f",scaledstep*i);
+ }
} else {
sprintf(graph_label,"%4.0f",scaledstep*i);
}
@@ -1427,10 +1693,10 @@
}
}
- gdImageString(gif, gdFontSmall,
+ gdImageString(gif, SmallFont,
(polyPoints[0].x - (strlen(graph_label) *
- gdFontSmall->w)-7),
- polyPoints[0].y - gdFontSmall->h/2+1,
+ SmallFont->w)-7),
+ polyPoints[0].y - SmallFont->h/2+1,
graph_label, graph_col[GRC_FONT].i);
gdImageSetStyle(gif, styleMajor, 2);
@@ -1488,7 +1754,7 @@
}
pixperstep = pixpex * minstep;
if(pixperstep > 5){minoridx = i;}
- if(pixperstep > 2 * gdFontSmall->h){majoridx = i;}
+ if(pixperstep > 2 * SmallFont->h){majoridx = i;}
}
styleMinor[0] = graph_col[GRC_GRID].i;
@@ -1539,10 +1805,10 @@
gdImageLine(gif, polyPoints[0].x,polyPoints[0].y,
polyPoints[1].x,polyPoints[0].y,gdStyled);
sprintf(graph_label,"%3.0e",value * yloglab[majoridx][i]);
- gdImageString(gif, gdFontSmall,
+ gdImageString(gif, SmallFont,
(polyPoints[0].x - (strlen(graph_label) *
- gdFontSmall->w)-7),
- polyPoints[0].y - gdFontSmall->h/2+1,
+ SmallFont->w)-7),
+ polyPoints[0].y - SmallFont->h/2+1,
graph_label, graph_col[GRC_FONT].i);
}
}
@@ -1647,11 +1913,11 @@
#else
# error "your libc has no strftime I guess we'll abort the exercise here."
#endif
- width=strlen(graph_label) * gdFontSmall->w;
+ width=strlen(graph_label) * SmallFont->w;
gr_pos=xtr(im,tilab) - width/2;
if (gr_pos >= im->xorigin
&& gr_pos + width <= im->xorigin+im->xsize)
- gdImageString(gif, gdFontSmall,
+ gdImageString(gif, SmallFont,
gr_pos, polyPoints[0].y+4,
graph_label, graph_col[GRC_FONT].i);
}
@@ -1731,26 +1997,26 @@
}
if (! res) {
char *nodata = "No Data found";
- gdImageString(gif, gdFontMediumBold,
+ gdImageString(gif, LargeFont,
im->xgif/2
- - (strlen(nodata)*gdFontMediumBold->w)/2,
+ - (strlen(nodata)*LargeFont->w)/2,
(2*im->yorigin-im->ysize) / 2,
nodata, graph_col[GRC_FONT].i);
}
/* yaxis description */
- gdImageStringUp(gif, gdFontSmall,
+ gdImageStringUp(gif, SmallFont,
7,
(im->yorigin - im->ysize/2
- +(strlen(im->ylegend)*gdFontSmall->w)/2 ),
+ +(strlen(im->ylegend)*SmallFont->w)/2 ),
im->ylegend, graph_col[GRC_FONT].i);
/* graph title */
- gdImageString(gif, gdFontMediumBold,
+ gdImageString(gif, LargeFont,
im->xgif/2
- - (strlen(im->title)*gdFontMediumBold->w)/2,
+ - (strlen(im->title)*LargeFont->w)/2,
8,
im->title, graph_col[GRC_FONT].i);
@@ -1773,7 +2039,7 @@
gdImageFilledPolygon(gif,polyPoints,4,im->gdes[i].col.i);
gdImagePolygon(gif,polyPoints,4,graph_col[GRC_FRAME].i);
- gdImageString(gif, gdFontSmall,
+ gdImageString(gif, SmallFont,
polyPoints[0].x+boxH+6,
polyPoints[0].y-1,
im->gdes[i].legend,
@@ -1782,7 +2048,7 @@
polyPoints[0].x = im->gdes[i].legloc.x;
polyPoints[0].y = im->gdes[i].legloc.y;
- gdImageString(gif, gdFontSmall,
+ gdImageString(gif, SmallFont,
polyPoints[0].x,
polyPoints[0].y,
im->gdes[i].legend,
@@ -1844,22 +2110,57 @@
}
return brush;
}
+/*****************************************************
+ * lazy check make sure we rely need to create this graph
+ *****************************************************/
+
+int lazy_check(image_desc_t *im){
+ FILE *fd = NULL;
+ struct stat gifstat;
+
+ if (im->lazy == 0) return 0; /* no lazy option */
+ if (stat(im->graphfile,&gifstat) != 0)
+ return 0; /* can't stat */
+ /* one pixel in the existing graph is more then what we would
+ change here ... */
+ if (time(NULL) - gifstat.st_mtime >
+ (im->end - im->start) / im->xsize)
+ return 0;
+ if ((fd = fopen(im->graphfile,"rb")) == NULL)
+ return 0; /* the file does not exist */
+ switch (im->imgformat) {
+ case IF_GIF:
+ GifSize(fd,&(im->xgif),&(im->ygif));
+ break;
+ case IF_PNG:
+ PngSize(fd,&(im->xgif),&(im->ygif));
+ break;
+ }
+ fclose(fd);
+ return 1;
+}
/* draw that picture thing ... */
int
graph_paint(image_desc_t *im, char ***calcpr)
{
int i,ii;
+ int lazy = lazy_check(im);
FILE *fo;
/* gif stuff */
gdImagePtr gif,brush;
double areazero = 0.0;
- enum gf_en lastgf = GF_LINE1;
+ enum gf_en stack_gf = GF_PRINT;
+ graph_desc_t *lastgdes = NULL;
gdPoint canvas[4], back[4]; /* points for canvas*/
+ /* if we are lazy and there is nothing to PRINT ... quit now */
+ if (lazy && im->prt_c==0) return 0;
+
/* pull the data from the rrd files ... */
+
if(data_fetch(im)==-1)
return -1;
@@ -1870,8 +2171,9 @@
/* calculate and PRINT and GPRINT definitions. We have to do it at
* this point because it will affect the length of the legends
* if there are no graph elements we stop here ...
+ * if we are lazy, try to quit ...
*/
- if(print_calc(im,calcpr)==0)
+ if(print_calc(im,calcpr)==0 || lazy)
return 0;
/* get actual drawing data and find min and max values*/
@@ -1890,17 +2192,17 @@
draw labels and other things outside the graph area */
- im->xorigin = 10 + 9 * gdFontSmall->w+gdFontSmall->h;
+ im->xorigin = 10 + 9 * SmallFont->w+SmallFont->h;
xtr(im,0);
im->yorigin = 14 + im->ysize;
ytr(im,DNAN);
if(im->title[0] != '\0')
- im->yorigin += (gdFontMediumBold->h+4);
+ im->yorigin += (LargeFont->h+4);
im->xgif=20+im->xsize + im->xorigin;
- im->ygif= im->yorigin+2*gdFontSmall->h;
+ im->ygif= im->yorigin+2*SmallFont->h;
/* determine where to place the legends onto the graphics.
and set im->ygif to match space requirements for text */
@@ -1985,19 +2287,30 @@
case GF_LINE2:
case GF_LINE3:
case GF_AREA:
-
- lastgf = im->gdes[i].gf;
+ stack_gf = im->gdes[i].gf;
case GF_STACK:
- if (im->gdes[i].col.i != -1){
+ /* fix data points at oo and -oo */
+ for(ii=0;ii<im->xsize;ii++){
+ if (isinf(im->gdes[i].p_data[ii])){
+ if (im->gdes[i].p_data[ii] > 0) {
+ im->gdes[i].p_data[ii] = im->maxval ;
+ } else {
+ im->gdes[i].p_data[ii] = im->minval ;
+ }
+
+ }
+ }
+
+ if (im->gdes[i].col.i != -1){
/* GF_LINE and frined */
- if(lastgf == GF_LINE1 || lastgf == GF_LINE2 || lastgf == GF_LINE3 ){
- brush = MkLineBrush(im,i,lastgf);
+ if(stack_gf == GF_LINE1 || stack_gf == GF_LINE2 || stack_gf == GF_LINE3 ){
+ brush = MkLineBrush(im,i,stack_gf);
gdImageSetBrush(gif, brush);
for(ii=1;ii<im->xsize;ii++){
if (isnan(im->gdes[i].p_data[ii-1]) ||
isnan(im->gdes[i].p_data[ii]))
continue;
- gdImageLine(gif,
+ gdImageLine(gif,
ii+im->xorigin-1,ytr(im,im->gdes[i].p_data[ii-1]),
ii+im->xorigin,ytr(im,im->gdes[i].p_data[ii]),
gdBrushed);
@@ -2009,26 +2322,33 @@
/* GF_AREA STACK type*/
if (im->gdes[i].gf == GF_STACK )
for(ii=0;ii<im->xsize;ii++){
- if ((i>0 && isnan(im->gdes[i-1].p_data[ii])) ||
- isnan(im->gdes[i].p_data[ii]))
- continue;
- gdImageLine(gif,
- ii+im->xorigin,ytr(im,im->gdes[i-1].p_data[ii]),
- ii+im->xorigin,ytr(im,im->gdes[i].p_data[ii]),
- im->gdes[i].col.i);
- }
-
- else /* simple GF_AREA */
- for(ii=0;ii<im->xsize;ii++){
- if (isnan(im->gdes[i].p_data[ii]))
+ if(isnan(im->gdes[i].p_data[ii])){
+ im->gdes[i].p_data[ii] = lastgdes->p_data[ii];
+ continue;
+ }
+
+ if (lastgdes->p_data[ii] == im->gdes[i].p_data[ii]){
+ continue;
+ }
+ gdImageLine(gif,
+ ii+im->xorigin,ytr(im,lastgdes->p_data[ii]),
+ ii+im->xorigin,ytr(im,im->gdes[i].p_data[ii]),
+ im->gdes[i].col.i);
+ }
+
+ else /* simple GF_AREA */
+ for(ii=0;ii<im->xsize;ii++){
+ if (isnan(im->gdes[i].p_data[ii])) {
+ im->gdes[i].p_data[ii] = 0;
continue;
+ }
gdImageLine(gif,
ii+im->xorigin,ytr(im,areazero),
ii+im->xorigin,ytr(im,im->gdes[i].p_data[ii]),
im->gdes[i].col.i);
}
}
-
+ lastgdes = &(im->gdes[i]);
break;
}
}
@@ -2064,15 +2384,22 @@
fo = stdout;
} else {
if ((fo = fopen(im->graphfile,"wb")) == NULL) {
- perror("rrd_graph gif write:");
rrd_set_error("cannot open %s for write",im->graphfile);
return (-1);
}
}
- gdImageGif(gif, fo);
+ switch (im->imgformat) {
+ case IF_GIF:
+ gdImageGif(gif, fo);
+ break;
+ case IF_PNG:
+ gdImagePng(gif, fo);
+ break;
+ }
if (strcmp(im->graphfile,"-") != 0)
fclose(fo);
gdImageDestroy(gif);
+
return 0;
}
@@ -2088,7 +2415,7 @@
im->gdes_c++;
- if ((im->gdes = (graph_desc_t *) realloc(im->gdes, (im->gdes_c)
+ if ((im->gdes = (graph_desc_t *) rrd_realloc(im->gdes, (im->gdes_c)
* sizeof(graph_desc_t)))==NULL){
rrd_set_error("realloc graph_descs");
return -1;
@@ -2125,11 +2452,14 @@
input[inp] != '\0';
inp++){
if (input[inp] == '\\' &&
- input[inp+1] != '\0' &&
- input[inp+1] == ':')
+ input[inp+1] != '\0' &&
+ (input[inp+1] == '\\' ||
+ input[inp+1] == ':')){
output[outp++] = input[++inp];
- else
+ }
+ else {
output[outp++] = input[inp];
+ }
}
output[outp] = '\0';
return inp;
@@ -2141,40 +2471,21 @@
image_desc_t im;
int i;
- long long_tmp,start_tmp,end_tmp;
+ long long_tmp;
+ time_t start_tmp=0,end_tmp=0;
char scan_gtm[12],scan_mtm[12],scan_ltm[12],col_nam[12];
char symname[100];
unsigned int col_red,col_green,col_blue;
long scancount;
int linepass = 0; /* stack can only follow directly after LINE* AREA or STACK */
-#ifdef WANT_AT_STYLE_TIMESPEC
struct time_value start_tv, end_tv;
char *parsetime_error = NULL;
- int start_tmp_is_ok = 0,
- end_tmp_is_ok = 0;
-#endif
- /* default values */
- end_tmp = time(NULL);
- start_tmp = -24*3600;
-#ifdef WANT_AT_STYLE_TIMESPEC
- end_tv.type = ABSOLUTE_TIME;
- end_tv.tm = *localtime(&end_tmp);
- end_tv.offset = 0;
-
- start_tv.type = RELATIVE_TO_END_TIME;
- start_tv.tm = *localtime(&end_tmp); /* to init tm_zone and tm_gmtoff */
- start_tv.offset = -24*3600;/* to be compatible with the original code. */
- start_tv.tm.tm_sec = 0; /** alternatively we could set tm_mday to -1 */
- start_tv.tm.tm_min = 0; /** but this would yield -23(25) hours offset */
- start_tv.tm.tm_hour = 0; /** twice a year, when DST is coming in or */
- start_tv.tm.tm_mday = 0; /** out of effect */
- start_tv.tm.tm_mon = 0;
- start_tv.tm.tm_year = 0;
- start_tv.tm.tm_wday = 0;
- start_tv.tm.tm_yday = 0;
- start_tv.tm.tm_isdst = -1; /* for mktime to guess */
-#endif
+ (*prdata)=NULL;
+
+ parsetime("end-24h", &start_tv);
+ parsetime("now", &end_tv);
+
im.xlab_user.minsec = -1;
im.xgif=0;
im.ygif=0;
@@ -2185,13 +2496,17 @@
im.minval = DNAN;
im.maxval = DNAN;
im.interlaced = 0;
+ im.extra_flags= 0;
im.rigid = 0;
+ im.imginfo = NULL;
+ im.lazy = 0;
im.logarithmic = 0;
im.ygridstep = DNAN;
im.base = 1000;
+ im.prt_c = 0;
im.gdes_c = 0;
im.gdes = NULL;
-
+ im.imgformat = IF_GIF; /* we default to GIF output */
for(i=0;i<DIM(graph_col);i++)
im.graph_col[i].red=-1;
@@ -2200,86 +2515,56 @@
while (1){
static struct option long_options[] =
{
- {"start", required_argument, 0, 's'},
- {"end", required_argument,0,'e'},
- {"x-grid", required_argument,0,'x'},
- {"y-grid", required_argument,0,'y'},
+ {"start", required_argument, 0, 's'},
+ {"end", required_argument, 0, 'e'},
+ {"x-grid", required_argument, 0, 'x'},
+ {"y-grid", required_argument, 0, 'y'},
{"vertical-label",required_argument,0,'v'},
- {"width", required_argument,0,'w'},
- {"height", required_argument,0,'h'},
- {"interlaced", no_argument,0,'i'},
- {"upper-limit",required_argument,0,'u'},
- {"lower-limit",required_argument,0,'l'},
- {"rigid", no_argument,0,'r'},
- {"base", required_argument,0,'b'},
- {"logarithmic",no_argument,0,'o'},
- {"color", required_argument,0,'c'},
- {"title", required_argument,0,'t'},
+ {"width", required_argument, 0, 'w'},
+ {"height", required_argument, 0, 'h'},
+ {"interlaced", no_argument, 0, 'i'},
+ {"upper-limit",required_argument, 0, 'u'},
+ {"lower-limit",required_argument, 0, 'l'},
+ {"rigid", no_argument, 0, 'r'},
+ {"base", required_argument, 0, 'b'},
+ {"logarithmic",no_argument, 0, 'o'},
+ {"color", required_argument, 0, 'c'},
+ {"title", required_argument, 0, 't'},
+ {"imginfo", required_argument, 0, 'f'},
+ {"imgformat", required_argument, 0, 'a'},
+ {"lazy", no_argument, 0, 'z'},
+ {"alt-y-grid", no_argument, 0, 257 },
+ {"alt-autoscale", no_argument, 0, 258 },
{0,0,0,0}};
int option_index = 0;
int opt;
opt = getopt_long(argc, argv,
- "s:e:x:y:v:w:h:iu:b:l:roc:t:",
+ "s:e:x:y:v:w:h:iu:l:rb:oc:t:f:a:z",
long_options, &option_index);
if (opt == EOF)
break;
switch(opt) {
+ case 257:
+ im.extra_flags |= ALTYGRID;
+ break;
+ case 258:
+ im.extra_flags |= ALTAUTOSCALE;
+ break;
case 's':
- if(im.gdes_c > 0){
- rrd_set_error("set start before graphing");
- im_free(&im);
- return -1;
- }
-#ifdef WANT_AT_STYLE_TIMESPEC
- {
- char *endp;
- start_tmp_is_ok = 0;
- start_tmp = strtol(optarg, &endp, 0);
- if (*endp == '\0') /* it was a valid number */
- if (start_tmp > 31122038 || /* 31 Dec 2038 in DDMMYYYY */
- start_tmp < 0) {
- start_tmp_is_ok = 1;
- break;
- }
if ((parsetime_error = parsetime(optarg, &start_tv))) {
rrd_set_error( "start time: %s", parsetime_error );
- im_free(&im);
return -1;
- }
}
-#else
- start_tmp = atol(optarg);
-#endif
break;
case 'e':
- if(im.gdes_c > 0){
- rrd_set_error("set end before graphing");
- im_free(&im);
- return -1;
- }
-#ifdef WANT_AT_STYLE_TIMESPEC
- {
- char *endp;
- end_tmp_is_ok = 0;
- end_tmp = strtol(optarg, &endp, 0);
- if (*endp == '\0') /* it was a valid number */
- if (end_tmp > 31122038) { /* 31 Dec 2038 in DDMMYYYY */
- end_tmp_is_ok = 1;
- break;
- }
if ((parsetime_error = parsetime(optarg, &end_tv))) {
rrd_set_error( "end time: %s", parsetime_error );
- im_free(&im);
return -1;
- }
}
-#else
- end_tmp = atol(optarg);
-#endif
break;
case 'x':
if(sscanf(optarg,
@@ -2294,19 +2579,19 @@
im.xlab_form) == 8){
if((im.xlab_user.gridtm = tmt_conv(scan_gtm)) == -1){
rrd_set_error("unknown keyword %s",scan_gtm);
- im_free(&im); return -1;
+ return -1;
} else if ((im.xlab_user.mgridtm = tmt_conv(scan_mtm)) == -1){
rrd_set_error("unknown keyword %s",scan_mtm);
- im_free(&im); return -1;
+ return -1;
} else if ((im.xlab_user.labtm = tmt_conv(scan_ltm)) == -1){
rrd_set_error("unknown keyword %s",scan_ltm);
- im_free(&im); return -1;
+ return -1;
}
im.xlab_user.minsec = 1;
im.xlab_user.stst = im.xlab_form;
} else {
rrd_set_error("invalid xgrid format");
- im_free(&im);return -1;
+ return -1;
}
break;
case 'y':
@@ -2316,18 +2601,19 @@
&im.ylabfact) == 2) {
if(im.ygridstep<=0){
rrd_set_error("grid step must be > 0");
- im_free(&im);return -1;
+ return -1;
} else if (im.ylabfact < 1){
rrd_set_error("label factor must be > 0");
- im_free(&im);return -1;
+ return -1;
}
} else {
rrd_set_error("invalid ygrid format");
- im_free(&im);return -1;
+ return -1;
}
break;
case 'v':
strncpy(im.ylegend,optarg,150);
+ im.ylegend[150]='\0';
break;
case 'u':
im.maxval = atof(optarg);
@@ -2339,20 +2625,14 @@
im.base = atol(optarg);
if(im.base != 1024 && im.base != 1000 ){
rrd_set_error("the only sensible value for base apart from 1000 is 1024");
- im_free(&im);
return -1;
}
break;
case 'w':
long_tmp = atol(optarg);
- if(im.gdes_c > 0){
- rrd_set_error("set width before graphing");
- im_free(&im);
- return -1;
- }
if (long_tmp < 10) {
rrd_set_error("width below 10 pixels");
- im_free(&im);return -1;
+ return -1;
}
im.xsize = long_tmp;
break;
@@ -2360,7 +2640,7 @@
long_tmp = atol(optarg);
if (long_tmp < 10) {
rrd_set_error("height below 10 pixels");
- im_free(&im); return -1;
+ return -1;
}
im.ysize = long_tmp;
break;
@@ -2370,6 +2650,18 @@
case 'r':
im.rigid = 1;
break;
+ case 'f':
+ im.imginfo = optarg;
+ break;
+ case 'a':
+ if((im.imgformat = if_conv(optarg)) == -1) {
+ rrd_set_error("unsupported graphics format '%s'",optarg);
+ return -1;
+ }
+ break;
+ case 'z':
+ im.lazy = 1;
+ break;
case 'o':
im.logarithmic = 1;
if (isnan(im.minval))
@@ -2389,121 +2681,79 @@
}
} else {
rrd_set_error("invalid color def format");
- im_free(&im);return -1;
+ return -1;
}
break;
case 't':
strncpy(im.title,optarg,150);
+ im.title[150]='\0';
break;
case '?':
rrd_set_error("unknown option '%s'",argv[optind-1]);
- return(-1);
+ return -1;
}
}
+
if (im.logarithmic == 1 && (im.minval <= 0 || isnan(im.minval))){
- rrd_set_error("for a logarithmic yaxis you must specify a lower-limit > 0");
- return(-1);
+ rrd_set_error("for a logarithmic yaxis you must specify a lower-limit > 0");
+ return -1;
}
- strncpy(im.graphfile,argv[optind],254);
+ strncpy(im.graphfile,argv[optind],sizeof(im.graphfile)-1);
+ im.graphfile[sizeof(im.graphfile)-1]='\0';
-#ifdef WANT_AT_STYLE_TIMESPEC
- if ((start_tv.type == RELATIVE_TO_END_TIME ||
- (start_tmp_is_ok && start_tmp < 0)) && /* same as the line above */
- end_tv.type == RELATIVE_TO_START_TIME) {
- rrd_set_error("the start and end times cannot be specified "
- "relative to each other");
- return(-1);
- }
-
- if (start_tv.type == RELATIVE_TO_START_TIME) {
- rrd_set_error("the start time cannot be specified relative to itself");
- return(-1);
- }
-
- if (end_tv.type == RELATIVE_TO_END_TIME) {
- rrd_set_error("the end time cannot be specified relative to itself");
- return(-1);
- }
-
- /* We don't care to keep all the values in their range,
- mktime will do this for us */
- if (start_tv.type == RELATIVE_TO_END_TIME) {
- if (end_tmp_is_ok)
- end_tv.tm = *localtime( &end_tmp );
- start_tv.tm.tm_sec += end_tv.tm.tm_sec;
- start_tv.tm.tm_min += end_tv.tm.tm_min;
- start_tv.tm.tm_hour += end_tv.tm.tm_hour;
- start_tv.tm.tm_mday += end_tv.tm.tm_mday;
- start_tv.tm.tm_mon += end_tv.tm.tm_mon;
- start_tv.tm.tm_year += end_tv.tm.tm_year;
- }
- if (end_tv.type == RELATIVE_TO_START_TIME) {
- if (start_tmp_is_ok)
- start_tv.tm = *localtime( &start_tmp );
- end_tv.tm.tm_sec += start_tv.tm.tm_sec;
- end_tv.tm.tm_min += start_tv.tm.tm_min;
- end_tv.tm.tm_hour += start_tv.tm.tm_hour;
- end_tv.tm.tm_mday += start_tv.tm.tm_mday;
- end_tv.tm.tm_mon += start_tv.tm.tm_mon;
- end_tv.tm.tm_year += start_tv.tm.tm_year;
- }
- if (!start_tmp_is_ok)
- start_tmp = mktime(&start_tv.tm) + start_tv.offset;
- if (!end_tmp_is_ok)
- end_tmp = mktime(&end_tv.tm) + end_tv.offset;
-#endif
-
- if (start_tmp < 0)
- start_tmp = end_tmp + start_tmp;
+ if (proc_start_end(&start_tv,&end_tv,&start_tmp,&end_tmp) == -1){
+ return -1;
+ }
if (start_tmp < 3600*24*365*10){
rrd_set_error("the first entry to fetch should be after 1980 (%ld)",start_tmp);
- return(-1);
+ return -1;
}
if (end_tmp < start_tmp) {
rrd_set_error("start (%ld) should be less than end (%ld)",
start_tmp, end_tmp);
- return(-1);
+ return -1;
}
im.start = start_tmp;
im.end = end_tmp;
+
for(i=optind+1;i<argc;i++){
int argstart=0;
int strstart=0;
- char varname[30],rpnex[256];
+ char varname[30],*rpnex;
gdes_alloc(&im);
if(sscanf(argv[i],"%10[A-Z0-9]:%n",symname,&argstart)==1){
- if((im.gdes[im.gdes_c-1].gf=gf_conv(symname))==-1){
- im_free(&im);
- rrd_set_error("unknown function '%s'",symname);
- return -1;
- }
+ if((im.gdes[im.gdes_c-1].gf=gf_conv(symname))==-1){
+ im_free(&im);
+ rrd_set_error("unknown function '%s'",symname);
+ return -1;
+ }
} else {
rrd_set_error("can't parse '%s'",argv[i]);
- im_free(&im);
- return -1;
- }
- /* reset linepass if a non LINE/STACK/AREA operator gets parsed */
- if (im.gdes[im.gdes_c-1].gf != GF_LINE1 &&
- im.gdes[im.gdes_c-1].gf != GF_LINE2 &&
- im.gdes[im.gdes_c-1].gf != GF_LINE3 &&
- im.gdes[im.gdes_c-1].gf != GF_AREA &&
- im.gdes[im.gdes_c-1].gf != GF_STACK) {
- linepass = 0;
+ im_free(&im);
+ return -1;
}
- /* allow \: to use : in strings */
-
-
- /* if we are still alive we got a valid graph operator */
+ /* reset linepass if a non LINE/STACK/AREA operator gets parsed
+
+ if (im.gdes[im.gdes_c-1].gf != GF_LINE1 &&
+ im.gdes[im.gdes_c-1].gf != GF_LINE2 &&
+ im.gdes[im.gdes_c-1].gf != GF_LINE3 &&
+ im.gdes[im.gdes_c-1].gf != GF_AREA &&
+ im.gdes[im.gdes_c-1].gf != GF_STACK) {
+ linepass = 0;
+ }
+ */
+
switch(im.gdes[im.gdes_c-1].gf){
- case GF_PRINT:
+ case GF_PRINT:
+ im.prt_c++;
case GF_GPRINT:
if(sscanf(
&argv[i][argstart],
@@ -2526,7 +2776,7 @@
return -1;
}
break;
- case GF_COMMENT:
+ case GF_COMMENT:
if(strlen(&argv[i][argstart])>FMT_LEG_LEN) argv[i][argstart+FMT_LEG_LEN-3]='\0' ;
strcpy(im.gdes[im.gdes_c-1].legend, &argv[i][argstart]);
break;
@@ -2541,7 +2791,7 @@
im.gdes[im.gdes_c-1].col.green = col_green;
im.gdes[im.gdes_c-1].col.blue = col_blue;
if(strstart <= 0){
- im.gdes[im.gdes_c-1].legend[0] = '\0';
+ im.gdes[im.gdes_c-1].legend[0] = '\0';
} else {
scan_for_col(&argv[i][argstart+strstart],FMT_LEG_LEN,im.gdes[im.gdes_c-1].legend);
}
@@ -2564,7 +2814,7 @@
im.gdes[im.gdes_c-1].col.green = col_green;
im.gdes[im.gdes_c-1].col.blue = col_blue;
if(strstart <= 0){
- im.gdes[im.gdes_c-1].legend[0] = '\0';
+ im.gdes[im.gdes_c-1].legend[0] = '\0';
} else {
scan_for_col(&argv[i][argstart+strstart],FMT_LEG_LEN,im.gdes[im.gdes_c-1].legend);
}
@@ -2591,13 +2841,13 @@
varname,
&col_red,
&col_green,
- &col_blue,
+ &col_blue,
&strstart))>=1){
im.gdes[im.gdes_c-1].col.red = col_red;
im.gdes[im.gdes_c-1].col.green = col_green;
im.gdes[im.gdes_c-1].col.blue = col_blue;
if(strstart <= 0){
- im.gdes[im.gdes_c-1].legend[0] = '\0';
+ im.gdes[im.gdes_c-1].legend[0] = '\0';
} else {
scan_for_col(&argv[i][argstart+strstart],FMT_LEG_LEN,im.gdes[im.gdes_c-1].legend);
}
@@ -2616,12 +2866,17 @@
}
break;
case GF_CDEF:
+ if((rpnex = malloc(strlen(&argv[i][argstart])*sizeof(char)))==NULL){
+ rrd_set_error("malloc for CDEF");
+ return -1;
+ }
if(sscanf(
- &argv[i][argstart],
- "%29[_A-Za-z0-9]=%254[^: ]",
- im.gdes[im.gdes_c-1].vname,
- rpnex) != 2){
+ &argv[i][argstart],
+ "%29[_A-Za-z0-9]=%[^: ]",
+ im.gdes[im.gdes_c-1].vname,
+ rpnex) != 2){
im_free(&im);
+ free(rpnex);
rrd_set_error("can't parse CDEF '%s'",&argv[i][argstart]);
return -1;
}
@@ -2629,7 +2884,7 @@
if(find_var(&im,im.gdes[im.gdes_c-1].vname) != -1){
im_free(&im);
rrd_set_error("duplicate variable '%s'",
- im.gdes[im.gdes_c-1].vname);
+ im.gdes[im.gdes_c-1].vname);
return -1;
}
if((im.gdes[im.gdes_c-1].rpnp = str2rpn(&im,rpnex))== NULL){
@@ -2637,52 +2892,36 @@
im_free(&im);
return -1;
}
-
+ free(rpnex);
break;
case GF_DEF:
-#ifdef WIN32
-/* count the number of ':', if 2 : short filename, if 3 : complete filename */
- {
- char * a; int cnt = 0;
- for ( a = &argv[i][argstart]; *a !=0; a++ ) {
- if ( *a == ':' ) cnt++;
- }
- if (cnt == 3) {
- if(sscanf(
- &argv[i][argstart],
- "%29[_A-Za-z0-9]=%1[A-Za-z]:%252[^:]:" DS_NAM_FMT ":" CF_NAM_FMT,
- im.gdes[im.gdes_c-1].vname,
- im.gdes[im.gdes_c-1].rrd,im.gdes[im.gdes_c-1].rrd+2,
- im.gdes[im.gdes_c-1].ds_nam,
- symname) != 5) {
+ if (sscanf(
+ &argv[i][argstart],
+ "%29[_A-Za-z0-9]=%n",
+ im.gdes[im.gdes_c-1].vname,
+ &strstart)== 1){
+ if(sscanf(&argv[i][argstart
+ +strstart
+ +scan_for_col(&argv[i][argstart+strstart],
+ 254,im.gdes[im.gdes_c-1].rrd)],
+ ":" DS_NAM_FMT ":" CF_NAM_FMT,
+ im.gdes[im.gdes_c-1].ds_nam,
+ symname) != 2){
im_free(&im);
- rrd_set_error("can't parse DEF '%s'",&argv[i][argstart]);
+ rrd_set_error("can't parse DEF '%s' -2",&argv[i][argstart]);
return -1;
}
- *(im.gdes[im.gdes_c-1].rrd+1) = ':';
} else {
-#endif /*WIN32*/
- if(sscanf(
- &argv[i][argstart],
- "%29[_A-Za-z0-9]=%254[^:]:" DS_NAM_FMT ":" CF_NAM_FMT,
- im.gdes[im.gdes_c-1].vname,
- im.gdes[im.gdes_c-1].rrd,
- im.gdes[im.gdes_c-1].ds_nam,
- symname) != 4){
- im_free(&im);
- rrd_set_error("can't parse DEF '%s'",&argv[i][argstart]);
- return -1;
- }
-#ifdef WIN32
+ im_free(&im);
+ rrd_set_error("can't parse DEF '%s'",&argv[i][argstart]);
+ return -1;
}
- }
-#endif /*WIN32*/
-
+
/* checking for duplicate DEF CDEFS */
- if(find_var(&im,im.gdes[im.gdes_c-1].vname) != -1){
+ if (find_var(&im,im.gdes[im.gdes_c-1].vname) != -1){
im_free(&im);
rrd_set_error("duplicate variable '%s'",
- im.gdes[im.gdes_c-1].vname);
+ im.gdes[im.gdes_c-1].vname);
return -1;
}
if((im.gdes[im.gdes_c-1].cf=cf_conv(symname))==-1){
@@ -2694,26 +2933,45 @@
}
}
-
-
+
if (im.gdes_c==0){
rrd_set_error("can't make a graph without contents");
im_free(&im);
return(-1);
}
-
-
- /* parse rest of arguments containing information on what to draw*/
+ /* parse rest of arguments containing information on what to draw*/
if (graph_paint(&im,prdata)==-1){
im_free(&im);
return -1;
}
+
*xsize=im.xgif;
*ysize=im.ygif;
+ if (im.imginfo){
+ char *filename;
+ if (! (*prdata)) {
+ /* maybe prdata is not allocated yet ... lets do it now */
+ if((*prdata = calloc(2,sizeof(char *)))==NULL){
+ rrd_set_error("malloc imginfo");
+ return -1;
+ };
+ }
+ if(((*prdata)[0] = malloc((strlen(im.imginfo)+200+strlen(im.graphfile))*sizeof(char)))
+ ==NULL){
+ rrd_set_error("malloc imginfo");
+ return -1;
+ }
+ filename=im.graphfile+strlen(im.graphfile);
+ while(filename > im.graphfile){
+ if (*(filename-1)=='/' || *(filename-1)=='\\' ) break;
+ filename--;
+ }
+
+ sprintf((*prdata)[0],im.imginfo,filename,im.xgif,im.ygif);
+ }
im_free(&im);
return 0;
}
-
Modified: trunk/orca/packages/rrdtool-1.0.7.2/src/rrd_open.c
==============================================================================
--- trunk/orca/packages/rrdtool-1.0.7.2/src/rrd_open.c (original)
+++ trunk/orca/packages/rrdtool-1.0.7.2/src/rrd_open.c Sat Jul 13 19:22:34 2002
@@ -1,5 +1,5 @@
/*****************************************************************************
- * RRDTOOL 0.99.31 Copyright Tobias Oetiker, 1997, 1998, 1999
+ * RRDtool Copyright Tobias Oetiker, 1997, 1998, 1999
*****************************************************************************
* rrd_open.c Open an RRD File
*****************************************************************************
@@ -8,6 +8,7 @@
*****************************************************************************/
#include "rrd_tool.h"
+#define MEMBLK 8192
/* open a database file, return its header and a open filehandle */
/* positioned to the first cdp in the first rra */
@@ -16,14 +17,24 @@
rrd_open(char *file_name, FILE **in_file, rrd_t *rrd, int rdwr)
{
+
char *mode = NULL;
+ rrd_init(rrd);
if (rdwr == RRD_READONLY) {
+#ifndef WIN32
+ mode = "r";
+#else
mode = "rb";
+#endif
} else {
+#ifndef WIN32
+ mode = "r+";
+#else
mode = "rb+";
+#endif
}
- if ((*in_file = fopen(file_name,mode)) == NULL ){
+ if (((*in_file) = fopen(file_name,mode)) == NULL ){
rrd_set_error("rrdopen can't open '%s'",file_name);
return (-1);
}
@@ -75,6 +86,7 @@
rrd->rra_ptr = NULL;
rrd->pdp_prep = NULL;
rrd->cdp_prep = NULL;
+ rrd->rrd_value = NULL;
}
void rrd_free(rrd_t *rrd)
@@ -86,4 +98,40 @@
free(rrd->rra_ptr);
free(rrd->pdp_prep);
free(rrd->cdp_prep);
+ free(rrd->rrd_value);
+}
+
+int readfile(char *file_name, char **buffer, int skipfirst){
+ long writecnt=0,totalcnt = MEMBLK;
+ FILE *input=NULL;
+ char c ;
+ if ((strcmp("-",file_name) == 0)) { *input = *stdin; }
+ else {
+ if ((input = fopen(file_name,"rb")) == NULL ){
+ rrd_set_error("readfile can't open '%s'",file_name);
+ return (-1);
+ }
+ }
+ if (skipfirst){
+ do { c = getc(input); } while (c != '\n' && ! feof(input));
+ }
+ if (((*buffer) = (char *) malloc((MEMBLK+4)*sizeof(char))) == NULL) {
+ perror("Allocate Buffer:");
+ exit(1);
+ };
+ do{
+ writecnt += fread((*buffer)+writecnt, 1, MEMBLK * sizeof(char) ,input);
+ if (writecnt >= totalcnt){
+ totalcnt += MEMBLK;
+ if (((*buffer)=rrd_realloc((*buffer), (totalcnt+4) * sizeof(char)))==NULL){
+ perror("Realloc Buffer:");
+ exit(1);
+ };
+ }
+ } while (! feof(input));
+ (*buffer)[writecnt] = '\0';
+ fclose(input);
+ return writecnt;
}
+
+
Added: trunk/orca/packages/rrdtool-1.0.7.2/src/rrd_cgi.c
==============================================================================
--- trunk/orca/packages/rrdtool-1.0.7.2/src/rrd_cgi.c (original)
+++ trunk/orca/packages/rrdtool-1.0.7.2/src/rrd_cgi.c Sat Jul 13 19:22:34 2002
@@ -0,0 +1,484 @@
+/*****************************************************************************
+ * RRDtool Copyright Tobias Oetiker, 1997, 1998, 1999
+ *****************************************************************************
+ * rrd_cgi.c RRD Web Page Generator
+ *****************************************************************************/
+
+#include "rrd_tool.h"
+#include <cgi.h>
+#include <time.h>
+
+
+#define MEMBLK 1024
+
+/* global variable for libcgi */
+s_cgi **cgiArg;
+
+/* in arg[0] find tags beginning with arg[1] call arg[2] on them
+ and replace by result of arg[2] call */
+int parse(char **, long, char *, char *(*)(long , char **));
+
+/**************************************************/
+/* tag replacers ... they are called from parse */
+/* through function pointers */
+/**************************************************/
+
+/* return cgi var named arg[0] */
+char* cgiget(long , char **);
+
+/* return a quoted cgi var named arg[0] */
+char* cgigetq(long , char **);
+
+/* return a quoted and sanitized cgi variable */
+char* cgigetqp(long , char **);
+
+/* call rrd_graph and insert apropriate image tag */
+char* drawgraph(long, char **);
+
+/* return PRINT functions from last rrd_graph call */
+char* drawprint(long, char **);
+
+/* set an evironment variable */
+char* rrdsetenv(long, char **);
+
+/* include the named file at this point */
+char* includefile(long, char **);
+
+/** http protocol needs special format, and GMT time **/
+char *http_time(time_t *);
+
+
+static char **calcpr=NULL;
+static void calfree (void){
+ if (calcpr) {
+ long i;
+ for(i=0;calcpr[i];i++){
+ free(calcpr[i]);
+ }
+ free(calcpr);
+ }
+}
+
+/* create freeable version of the string */
+char * stralloc(char *str){
+ char *nstr = malloc((strlen(str)+1)*sizeof(char));
+ strcpy(nstr,str);
+ return(nstr);
+}
+int main(int argc, char *argv[]) {
+ long length;
+ char *buffer;
+ char *server_url = NULL;
+ long i;
+ long goodfor=0;
+ long filter=0;
+ long refresh=0;
+ while (1){
+ static struct option long_options[] =
+ {
+ {"goodfor", required_argument, 0, 'g'},
+ {"filter", no_argument, 0, 'f'},
+ {"refresh", no_argument, 0, 'r'},
+ {0,0,0,0}
+ };
+ int option_index = 0;
+ int opt;
+ opt = getopt_long(argc, argv, "g:fr",
+ long_options, &option_index);
+ if (opt == EOF)
+ break;
+ switch(opt) {
+ case 'g':
+ goodfor=atol(optarg);
+ break;
+ case 'f':
+ filter=1;
+ break;
+ case 'r':
+ refresh=1;
+ break;
+ case '?':
+ printf("unknown commandline option '%s'\n",argv[optind-1]);
+ return -1;
+ }
+ }
+
+ if(filter==0) {
+ cgiDebug(0,0);
+ cgiArg = cgiInit ();
+ server_url = getenv("SERVER_URL");
+ }
+ if (optind != argc-1) {
+ fprintf (stderr, "Command line error. Expected Input file name! %d\n",optind);
+ exit(1);
+ }
+
+ length = readfile(argv[optind], &buffer, 1);
+
+ if(rrd_test_error()){
+ fprintf(stderr, "ERROR: %s\n",rrd_get_error());
+ exit(1);
+ }
+
+ if(filter==0) {
+ /* pass 1 makes only sense in cgi mode */
+ for (i=0;buffer[i] != '\0'; i++){
+ i +=parse(&buffer,i,"<RRD::CV ",cgiget);
+ i +=parse(&buffer,i,"<RRD::CV::QUOTE ",cgigetq);
+ i +=parse(&buffer,i,"<RRD::CV::PATH ",cgigetqp);
+ }
+ }
+
+ /* pass 2 */
+ for (i=0;buffer[i] != '\0'; i++){
+ i += parse(&buffer,i,"<RRD::INCLUDE ",includefile);
+ }
+
+ /* pass 3 */
+ for (i=0;buffer[i] != '\0'; i++){
+ i += parse(&buffer,i,"<RRD::SETENV ",rrdsetenv);
+ i += parse(&buffer,i,"<RRD::GRAPH ",drawgraph);
+ i += parse(&buffer,i,"<RRD::PRINT ",drawprint);
+ }
+
+ if (filter==0){
+ printf ("Content-Type: text/html\n"
+ "Content-Length: %d\n", strlen(buffer));
+ if (goodfor > 0){
+ time_t now;
+ now = time(NULL);
+ printf ("Last-Modified: %s\n",http_time(&now));
+ now += goodfor;
+ printf ("Expires: %s\n",http_time(&now));
+ if (refresh) {
+ printf("Refresh: %ld\n", goodfor);
+ }
+ }
+ printf ("\n");
+ }
+ printf ("%s", buffer);
+ calfree();
+ free(buffer);
+ exit(0);
+}
+
+/* remove occurences of .. this is a general measure to make
+ paths which came in via cgi do not go UP ... */
+
+char* rrdsetenv(long argc, char **args){
+ if (argc >= 2) {
+ char *xyz=malloc((strlen(args[0])+strlen(args[1])+3)*sizeof(char));
+ if (xyz == NULL){
+ return stralloc("[ERROR: allocating setenv buffer]");
+ };
+ sprintf(xyz,"%s=%s",args[0],args[1]);
+ if( putenv(xyz) == -1) {
+ return stralloc("[ERROR: faild to do putenv]");
+ };
+ } else {
+ return stralloc("[ERROR: setenv faild because not enough arguments were defined]");
+ }
+ return stralloc("");
+}
+
+char* includefile(long argc, char **args){
+ char *buffer;
+ if (argc >= 1) {
+ readfile(args[0], &buffer, 0);
+ if (rrd_test_error()) {
+ char *err = malloc((strlen(rrd_get_error())+20)*sizeof(char));
+ sprintf(err, "[ERROR: %s]",rrd_get_error());
+ rrd_clear_error();
+ return err;
+ } else {
+ return buffer;
+ }
+ }
+ else
+ {
+ return stralloc("[ERROR: No Inclue file defined]");
+ }
+}
+
+char* rrdstrip(char *buf){
+ char *start;
+ if (buf == NULL) return NULL;
+ while ((start = strstr(buf,"<"))){
+ *start = '_';
+ }
+ while ((start = strstr(buf,">"))){
+ *start = '_';
+ }
+ return stralloc(buf);
+}
+
+char* cgigetq(long argc, char **args){
+ if (argc>= 1){
+ char *buf = rrdstrip(cgiGetValue(cgiArg,args[0]));
+ char *buf2;
+ char *c,*d;
+ int qc=0;
+ if (buf==NULL) return NULL;
+
+ for(c=buf;*c != '\0';c++)
+ if (*c == '"') qc++;
+ if((buf2=malloc((strlen(buf) + qc*4 +2) * sizeof(char)))==NULL){
+ perror("Malloc Buffer");
+ exit(1);
+ };
+ c=buf;
+ d=buf2;
+ *(d++) = '"';
+ while(*c != '\0'){
+ if (*c == '"') {
+ *(d++) = '"';
+ *(d++) = '\'';
+ *(d++) = '"';
+ *(d++) = '\'';
+ }
+ *(d++) = *(c++);
+ }
+ *(d++) = '"';
+ *(d) = '\0';
+ free(buf);
+ return buf2;
+ }
+
+ return stralloc("[ERROR: not enough argument for RRD::CV::QUOTE]");
+}
+
+/* remove occurences of .. this is a general measure to make
+ paths which came in via cgi do not go UP ... */
+
+char* cgigetqp(long argc, char **args){
+ if (argc>= 1){
+ char *buf = rrdstrip(cgiGetValue(cgiArg,args[0]));
+ char *buf2;
+ char *c,*d;
+ int qc=0;
+ if (buf==NULL) return NULL;
+
+ for(c=buf;*c != '\0';c++)
+ if (*c == '"') qc++;
+ if((buf2=malloc((strlen(buf) + qc*4 +2) * sizeof(char)))==NULL){
+ perror("Malloc Buffer");
+ exit(1);
+ };
+ c=buf;
+ d=buf2;
+ *(d++) = '"';
+ while(*c != '\0'){
+ if (*c == '"') {
+ *(d++) = '"';
+ *(d++) = '\'';
+ *(d++) = '"';
+ *(d++) = '\'';
+ }
+ if(*c == '/') {
+ *(d++) = '_';c++;
+ } else {
+ if (*c=='.' && *(c+1) == '.'){
+ c += 2;
+ *(d++) = '_'; *(d++) ='_';
+ } else {
+
+ *(d++) = *(c++);
+ }
+ }
+ }
+ *(d++) = '"';
+ *(d) = '\0';
+ free(buf);
+ return buf2;
+ }
+
+ return stralloc("[ERROR: not enough arguments for RRD::CV::PATH]");
+
+}
+
+
+char* cgiget(long argc, char **args){
+ if (argc>= 1)
+ return rrdstrip(cgiGetValue(cgiArg,args[0]));
+ else
+ return stralloc("[ERROR: not enough arguments for RRD::CV]");
+}
+
+
+
+char* drawgraph(long argc, char **args){
+ int i,xsize, ysize;
+ for(i=0;i<argc;i++)
+ if(strcmp(args[i],"--imginfo")==0 || strcmp(args[i],"-g")==0) break;
+ if(i==argc) {
+ args[argc++] = "--imginfo";
+ args[argc++] = "<IMG SRC=\"./%s\" WIDTH=\"%lu\" HEIGHT=\"%lu\">";
+ }
+ optind=0; /* reset gnu getopt */
+ opterr=0; /* reset gnu getopt */
+ calfree();
+ if( rrd_graph(argc+1, args-1, &calcpr, &xsize, &ysize) != -1 ) {
+ return stralloc(calcpr[0]);
+ } else {
+ if (rrd_test_error()) {
+ char *err = malloc((strlen(rrd_get_error())+20)*sizeof(char));
+ sprintf(err, "[ERROR: %s]",rrd_get_error());
+ rrd_clear_error();
+ calfree();
+ return err;
+ }
+ }
+ return NULL;
+}
+
+char* drawprint(long argc, char **args){
+ if (argc>=1 && calcpr != NULL){
+ long i=0;
+ while (calcpr[i] != NULL) i++; /*determine number lines in calcpr*/
+ if (atol(args[0])<i-1)
+ return calcpr[atol(args[0])+1];
+ }
+ return stralloc("[ERROR: RRD::PRINT argument error]");
+}
+
+/* scan aLine until an unescaped '>' arives */
+char* scanargs(char *aLine, long *argc, char ***args)
+{
+ char *getP, *putP;
+ char Quote = 0;
+ int argal = MEMBLK;
+ int braket = 0;
+ int inArg = 0;
+ if (((*args) = (char **) malloc(MEMBLK*sizeof(char *))) == NULL) {
+ return NULL;
+ }
+ /* sikp leading blanks */
+ while (*aLine && *aLine <= ' ') aLine++;
+
+ *argc = 0;
+ getP = aLine;
+ putP = aLine;
+ while (*getP && !( !Quote && (braket == 0) && ((*getP) == '>'))){
+ if (*getP < ' ') *getP = ' '; /*remove all special chars*/
+ switch (*getP) {
+ case ' ':
+ if (Quote){
+ *(putP++)=*getP;
+ } else
+ if(inArg) {
+ *(putP++) = 0;
+ inArg = 0;
+ }
+ break;
+ case '"':
+ case '\'':
+ if (Quote != 0) {
+ if (Quote == *getP)
+ Quote = 0;
+ else {
+ *(putP++)=*getP;
+ }
+ } else {
+ if(!inArg){
+ (*args)[++(*argc)] = putP;
+ inArg=1;
+ }
+ Quote = *getP;
+ }
+ break;
+ default:
+ if (Quote == 0 && (*getP) == '<') {
+ braket++;
+ }
+ if (Quote == 0 && (*getP) == '>') {
+ braket--;
+ }
+
+ if(!inArg){
+ (*args)[++(*argc)] = putP;
+ inArg=1;
+ }
+ *(putP++)=*getP;
+ break;
+ }
+ if ((*argc) >= argal-10 ) {
+ argal += MEMBLK;
+ if (((*args)=rrd_realloc((*args),(argal)*sizeof(char *))) == NULL) {
+ return NULL;
+ }
+ }
+ getP++;
+ }
+
+ *putP = '\0';
+ (*argc)++;
+ if (Quote)
+ return NULL;
+ else
+ return getP+1; /* pointer to next char after parameter */
+}
+
+
+
+int parse(char **buf, long i, char *tag,
+ char *(*func)(long argc, char **args)){
+
+ /* the name of the vairable ... */
+ char *val;
+ long valln;
+ char **args;
+ char *end;
+ long end_offset;
+ long argc;
+ /* do we like it ? */
+ if (strncmp((*buf)+i, tag, strlen(tag))!=0) return 0;
+ /* scanargs puts \0 into *buf ... so after scanargs it is probably
+ not a good time to use strlen on buf */
+ end = scanargs((*buf)+i+strlen(tag),&argc,&args);
+ if (! end) {
+ for (;argc>2;argc--){
+ *((args[argc-1])-1)=' ';
+ }
+ val = stralloc("[ERROR: Parsing Problem with the following text\n"
+ " Check original file. This may have been altered by parsing.]\n\n");
+ end = (*buf)+i+1;
+ } else {
+ val = func(argc-1,args+1);
+ free (args);
+ }
+ /* for (ii=0;ii<argc;ii++) printf("'%s'\n", args[ii]); */
+ if (val != NULL) {
+ valln = strlen(val);
+ } else { valln = 0;}
+
+ /* make enough room for replacement */
+ end_offset = end - (*buf);
+ if(end-(*buf) < i + valln){ /* make sure we do not shrink the mallocd block */
+ /* calculating the new length of the buffer is simple. add current
+ buffer pos (i) to length of string after replaced tag to length
+ of replacement string and add 1 for the final zero ... */
+ if(((*buf) = rrd_realloc((*buf),
+ (i+strlen(end) + valln +1) * sizeof(char)))==NULL){
+ perror("Realoc buf:");
+ exit(1);
+ };
+ }
+ end = (*buf) + end_offset; /* make sure the 'end' pointer gets moved
+ along with the buf pointer when realoc
+ moves memmory ... */
+ /* splice the variable */
+ memmove ((*buf)+i+valln,end,strlen(end)+1);
+ if (val != NULL ) memmove ((*buf)+i,val, valln);
+ free(val);
+ return valln;
+}
+
+char *
+http_time(time_t *now) {
+ struct tm *tmptime;
+ static char buf[60];
+
+ tmptime=gmtime(now);
+ strftime(buf,sizeof(buf),"%a, %d %b %Y %H:%M:%S GMT",tmptime);
+ return(buf);
+}
Modified: trunk/orca/packages/rrdtool-1.0.7.2/src/rrd_create.c
==============================================================================
--- trunk/orca/packages/rrdtool-1.0.7.2/src/rrd_create.c (original)
+++ trunk/orca/packages/rrdtool-1.0.7.2/src/rrd_create.c Sat Jul 13 19:22:34 2002
@@ -1,5 +1,5 @@
/*****************************************************************************
- * RRDTOOL 0.99.31 Copyright Tobias Oetiker, 1997, 1998, 1999
+ * RRDtool Copyright Tobias Oetiker, 1997, 1998, 1999
*****************************************************************************
* rrd_create.c creates new rrds
*****************************************************************************/
@@ -13,12 +13,11 @@
rrd_t rrd;
long i,long_tmp;
time_t last_up;
-#ifdef WANT_AT_STYLE_TIMESPEC
struct time_value last_up_tv;
char *parsetime_error = NULL;
- int last_up_is_ok = 0;
-#endif
+ /* init last_up */
+ last_up = time(NULL)-10;
/* init rrd clean */
rrd_init(&rrd);
/* static header */
@@ -44,7 +43,7 @@
/* a default value */
rrd.ds_def = NULL;
rrd.rra_def = NULL;
- last_up = time(NULL);
+
while (1){
static struct option long_options[] =
{
@@ -62,17 +61,6 @@
switch(opt) {
case 'b':
-#ifdef WANT_AT_STYLE_TIMESPEC
- {
- char *endp;
- last_up_is_ok = 0;
- last_up = strtol(optarg, &endp, 0);
- if (*endp == '\0') /* it was a valid number */
- if (last_up > 31122038 || /* 31 Dec 2038 in DDMMYYYY */
- last_up < 0) {
- last_up_is_ok = 1;
- break;
- }
if ((parsetime_error = parsetime(optarg, &last_up_tv))) {
rrd_set_error("last update time: %s", parsetime_error );
rrd_free(&rrd);
@@ -85,15 +73,9 @@
rrd_free(&rrd);
return(-1);
}
- if (!last_up_is_ok)
- last_up = mktime(&last_up_tv.tm) + last_up_tv.offset;
- }/* this is for the entire block */
-
-#else
- last_up = atol(optarg);
-#endif
- if (last_up < 0) /* if time is negative this means go back from now. */
- last_up = time(NULL)+last_up;
+
+ last_up = mktime(&last_up_tv.tm) + last_up_tv.offset;
+
if (last_up < 3600*24*365*10){
rrd_set_error("the first entry to the RRD should be after 1980");
rrd_free(&rrd);
@@ -123,7 +105,7 @@
char minstr[20], maxstr[20];
if (strncmp(argv[i],"DS:",3)==0){
size_t old_size = sizeof(ds_def_t)*(rrd.stat_head->ds_cnt);
- if((rrd.ds_def = realloc(rrd.ds_def,
+ if((rrd.ds_def = rrd_realloc(rrd.ds_def,
old_size+sizeof(ds_def_t)))==NULL){
rrd_set_error("allocating rrd.ds_def");
rrd_free(&rrd);
@@ -166,7 +148,7 @@
}
} else if (strncmp(argv[i],"RRA:",3)==0){
size_t old_size = sizeof(rra_def_t)*(rrd.stat_head->rra_cnt);
- if((rrd.rra_def = realloc(rrd.rra_def,
+ if((rrd.rra_def = rrd_realloc(rrd.rra_def,
old_size+sizeof(rra_def_t)))==NULL){
rrd_set_error("allocating rrd.rra_def");
rrd_free(&rrd);
Modified: trunk/orca/packages/rrdtool-1.0.7.2/src/rrd_last.c
==============================================================================
--- trunk/orca/packages/rrdtool-1.0.7.2/src/rrd_last.c (original)
+++ trunk/orca/packages/rrdtool-1.0.7.2/src/rrd_last.c Sat Jul 13 19:22:34 2002
@@ -1,5 +1,5 @@
/*****************************************************************************
- * RRDTOOL 0.99.31 Copyright Tobias Oetiker, 1997, 1998, 1999
+ * RRDtool Copyright Tobias Oetiker, 1997, 1998, 1999
*****************************************************************************
* rrd_last.c
*****************************************************************************
Modified: trunk/orca/packages/rrdtool-1.0.7.2/src/rrd_dump.c
==============================================================================
--- trunk/orca/packages/rrdtool-1.0.7.2/src/rrd_dump.c (original)
+++ trunk/orca/packages/rrdtool-1.0.7.2/src/rrd_dump.c Sat Jul 13 19:22:35 2002
@@ -1,5 +1,5 @@
/*****************************************************************************
- * RRDTOOL 0.99.31 Copyright Tobias Oetiker, 1997, 1998, 1999
+ * RRDtool Copyright Tobias Oetiker, 1997, 1998, 1999
*****************************************************************************
* rrd_dump Display a RRD
*****************************************************************************
@@ -9,146 +9,139 @@
#include "rrd_tool.h"
+extern char *tzname[2];
+
int
rrd_dump(int argc, char **argv)
{
- int i,ii,iii,full=0;
+ int i,ii,ix,iii=0;
time_t now;
char somestring[255];
rrd_value_t my_cdp;
-
+ long rra_base, rra_start, rra_next;
FILE *in_file;
rrd_t rrd;
- while (1){
- static struct option long_options[] =
- {
- {"full", no_argument, 0, 'f'},
- {0,0,0,0}
- };
- int option_index = 0;
- int opt;
- opt = getopt_long(argc, argv, "f",
- long_options, &option_index);
- if (opt == EOF)
- break;
-
- switch(opt) {
- case 'f':
- full=1;
- break;
- case '?':
- rrd_set_error("unknown option '%s'",argv[optind-1]);
- return(-1);
- }
- }
-
- if(optind >= argc){
- rrd_set_error("please specify an rrd");
- return(-1);
- }
- if(rrd_open(argv[optind],&in_file,&rrd, RRD_READONLY)==-1){
+ if(rrd_open(argv[1],&in_file,&rrd, RRD_READONLY)==-1){
return(-1);
}
- puts("RRD Header");
- puts("---------------------------");
- puts("");
- puts("* stat_head");
- printf("\tcookie: '%s'\n",rrd.stat_head->cookie);
- printf("\tversion: '%s'\n",rrd.stat_head->version);
- printf("\tfloat_cookie: %e\n",rrd.stat_head->float_cookie);
-
- printf("\tds_cnt: %lu\n",rrd.stat_head->ds_cnt);
- printf("\trra_cnt: %lu\n",rrd.stat_head->rra_cnt);
- printf("\tpdp_step: %lu seconds\n",rrd.stat_head->pdp_step);
-
- for(i=0;i<rrd.stat_head->ds_cnt;i++){
- printf("\n* ds_def[%i]\n",i);
- printf("\tds-nam: %s\n",rrd.ds_def[i].ds_nam);
- printf("\tdst: %s\n",rrd.ds_def[i].dst);
- printf("\tds_mrhb: %lu\n",rrd.ds_def[i].par[DS_mrhb_cnt].u_cnt);
- printf("\tmax_val: %e\n",rrd.ds_def[i].par[DS_max_val].u_val);
- printf("\tmin_val: %e\n",rrd.ds_def[i].par[DS_min_val].u_val);
- }
-
- for(i=0;i<rrd.stat_head->rra_cnt;i++){
- printf("\n* rra_def[%i]\n",i);
- printf("\tcf_name: %s\n",rrd.rra_def[i].cf_nam);
- printf("\trow_cnt: %lu\n",rrd.rra_def[i].row_cnt);
- printf("\tpdp_cnt: %lu\n",rrd.rra_def[i].pdp_cnt);
- }
-
- printf("\n* live_head\n");
+ puts("<!-- Round Robin Database Dump -->");
+ puts("<rrd>");
+ printf("\t<version> %s </version>\n",rrd.stat_head->version);
+ printf("\t<step> %lu </step> <!-- Seconds -->\n",rrd.stat_head->pdp_step);
#if HAVE_STRFTIME
- strftime(somestring,200,"%Y-%m-%d %H:%M:%S",
+ strftime(somestring,200,"%Y-%m-%d %H:%M:%S %Z",
localtime(&rrd.live_head->last_up));
#else
# error "Need strftime"
#endif
- printf("\tlast_up: '%lu' %s\n",
+ printf("\t<lastupdate> %ld </lastupdate> <!-- %s -->\n\n",
rrd.live_head->last_up,somestring);
-
- printf("\n* pdp_prep\n");
for(i=0;i<rrd.stat_head->ds_cnt;i++){
- printf("\n (ds='%s')\n",rrd.ds_def[i].ds_nam);
-
-
- printf("\tlast_ds: '%s'\n",rrd.pdp_prep[i].last_ds);
- printf("\tvalue: %e\n",rrd.pdp_prep[i].scratch[PDP_val].u_val);
- printf("\tunkn_sec: %lu seconds\n",
+ printf("\t<ds>\n");
+ printf("\t\t<name> %s </name>\n",rrd.ds_def[i].ds_nam);
+ printf("\t\t<type> %s </type>\n",rrd.ds_def[i].dst);
+ printf("\t\t<minimal_heartbeat> %lu </minimal_heartbeat>\n",rrd.ds_def[i].par[DS_mrhb_cnt].u_cnt);
+ if (isnan(rrd.ds_def[i].par[DS_min_val].u_val)){
+ printf("\t\t<min> NaN </min>\n");
+ } else {
+ printf("\t\t<min> %0.10e </min>\n",rrd.ds_def[i].par[DS_min_val].u_val);
+ }
+ if (isnan(rrd.ds_def[i].par[DS_max_val].u_val)){
+ printf("\t\t<max> NaN </max>\n");
+ } else {
+ printf("\t\t<max> %0.10e </max>\n",rrd.ds_def[i].par[DS_max_val].u_val);
+ }
+ printf("\n\t\t<!-- PDP Status -->\n");
+ printf("\t\t<last_ds> %s </last_ds>\n",rrd.pdp_prep[i].last_ds);
+ if (isnan(rrd.pdp_prep[i].scratch[PDP_val].u_val)){
+ printf("\t\t<value> NaN </value>\n");
+ } else {
+ printf("\t\t<value> %0.10e </value>\n",rrd.pdp_prep[i].scratch[PDP_val].u_val);
+ }
+ printf("\t\t<unknown_sec> %lu </unknown_sec>\n",
rrd.pdp_prep[i].scratch[PDP_unkn_sec_cnt].u_cnt);
+
+ printf("\t</ds>\n\n");
}
-
- printf("\n* cdp_prep");
- for(i=0;i<rrd.stat_head->rra_cnt;i++){
- printf("\n (rra=%i)\n",i);
- for(ii=0;ii<rrd.stat_head->ds_cnt;ii++){
- printf("\n (ds=%s)\n",rrd.ds_def[ii].ds_nam);
- printf("\tvalue: %e\n",
- rrd.cdp_prep[i*rrd.stat_head->ds_cnt+ii].scratch[CDP_val].u_val);
- printf("\tunkn_pdp: %lu pdp\n",
- rrd.cdp_prep[i*rrd.stat_head->ds_cnt+ii].scratch[CDP_unkn_pdp_cnt].u_cnt);
- }
- }
- for(i=0;i<rrd.stat_head->rra_cnt;i++){
- printf("\n* rra_ptr[%i]\n",i);
- printf("\tcur_row: %lu\n",rrd.rra_ptr[i].cur_row);
- }
+ puts("<!-- Round Robin Archives -->");
+
+ rra_base=ftell(in_file);
+ rra_next = rra_base;
- if (full) {
- puts("");
- puts("RRD Contents");
- puts("-----------------------");
- puts("");
- for(i=0;i<rrd.stat_head->rra_cnt;i++){
- printf("[%3i]:\n",i);
+ for(i=0;i<rrd.stat_head->rra_cnt;i++){
+
+ long timer=0;
+ rra_start= rra_next;
+ rra_next += ( rrd.stat_head->ds_cnt
+ * rrd.rra_def[i].row_cnt
+ * sizeof(rrd_value_t));
+ printf("\t<rra>\n");
+ printf("\t\t<cf> %s </cf>\n",rrd.rra_def[i].cf_nam);
+ printf("\t\t<pdp_per_row> %lu </pdp_per_row> <!-- %lu seconds -->\n\n",
+ rrd.rra_def[i].pdp_cnt, rrd.rra_def[i].pdp_cnt
+ *rrd.stat_head->pdp_step);
+ printf("\t\t<cdp_prep>\n");
+ for(ii=0;ii<rrd.stat_head->ds_cnt;ii++){
+ double value = rrd.cdp_prep[i*rrd.stat_head->ds_cnt+ii].scratch[CDP_val].u_val;
+ printf("\t\t\t<ds>");
+ if (isnan(value)){
+ printf("<value> NaN </value>");
+ } else {
+ printf("<value> %0.10e </value>", value);
+ }
+ printf(" <unknown_datapoints> %lu </unknown_datapoints>",
+ rrd.cdp_prep[i*rrd.stat_head->ds_cnt+ii].scratch[CDP_unkn_pdp_cnt].u_cnt);
+ printf("</ds>\n");
+ }
+ printf("\t\t</cdp_prep>\n");
+
+ printf("\t\t<database>\n");
+ fseek(in_file,(rra_start
+ +(rrd.rra_ptr[i].cur_row+1)
+ * rrd.stat_head->ds_cnt
+ * sizeof(rrd_value_t)),SEEK_SET);
+ timer = - (rrd.rra_def[i].row_cnt-1);
+ ii=rrd.rra_ptr[i].cur_row;
+ for(ix=0;ix<rrd.rra_def[i].row_cnt;ix++){
+ ii++;
+ if (ii>=rrd.rra_def[i].row_cnt) {
+ fseek(in_file,rra_start,SEEK_SET);
+ ii=0; /* wrap if max row cnt is reached */
+ }
now = (rrd.live_head->last_up
- - rrd.live_head->last_up % (rrd.rra_def[i].pdp_cnt*rrd.stat_head->pdp_step)
- - rrd.rra_ptr[i].cur_row * rrd.rra_def[i].pdp_cnt*rrd.stat_head->pdp_step);
- for(ii=0;ii<rrd.rra_def[i].row_cnt;ii++){
- if (rrd.rra_ptr[i].cur_row==ii) {
- printf("-> ");
+ - rrd.live_head->last_up
+ % (rrd.rra_def[i].pdp_cnt*rrd.stat_head->pdp_step))
+ + (timer*rrd.rra_def[i].pdp_cnt*rrd.stat_head->pdp_step);
+
+ timer++;
+#if HAVE_STRFTIME
+ strftime(somestring,200,"%Y-%m-%d %H:%M:%S %Z", localtime(&now));
+#else
+# error "Need strftime"
+#endif
+ printf("\t\t\t<!-- %s --> <row>",somestring);
+ for(iii=0;iii<rrd.stat_head->ds_cnt;iii++){
+ fread(&my_cdp,sizeof(rrd_value_t),1,in_file);
+ if (isnan(my_cdp)){
+ printf("<v> NaN </v>");
} else {
- printf(" ");
- }
- printf("%10lu",now);
- now += rrd.rra_def[i].pdp_cnt*rrd.stat_head->pdp_step;
- if (rrd.rra_ptr[i].cur_row==ii)
- now -= rrd.rra_def[i].pdp_cnt*rrd.stat_head->pdp_step*rrd.rra_def[i].row_cnt;
-
- for(iii=0;iii<rrd.stat_head->ds_cnt;iii++){
- fread(&my_cdp,sizeof(rrd_value_t),1,in_file);
-
- printf(" %12.3f",my_cdp);
- }
- printf("\n");
+ printf("<v> %0.10e </v>",my_cdp);
+ };
}
+ printf("</row>\n");
}
+ printf("\t\t</database>\n\t</rra>\n");
+
}
+ printf("</rrd>\n");
rrd_free(&rrd);
fclose(in_file);
return(0);
}
+
+
+
Added: trunk/orca/packages/rrdtool-1.0.7.2/src/pngsize.c
==============================================================================
--- trunk/orca/packages/rrdtool-1.0.7.2/src/pngsize.c (original)
+++ trunk/orca/packages/rrdtool-1.0.7.2/src/pngsize.c Sat Jul 13 19:22:35 2002
@@ -0,0 +1,42 @@
+/*****************************************************************************
+ * RRDtool Copyright Tobias Oetiker, 1997, 1998, 1999
+ *****************************************************************************
+ * pngsize.c determine the size of a PNG image
+ *****************************************************************************/
+
+#include <png.h>
+
+int
+PngSize(FILE *fd, long *width, long *height)
+{
+ png_structp png_read_ptr =
+ png_create_read_struct(PNG_LIBPNG_VER_STRING,
+ (png_voidp)NULL,
+ /* we would need to point to error handlers
+ here to do it properly */
+ (png_error_ptr)NULL, (png_error_ptr)NULL);
+
+ png_infop info_ptr = png_create_info_struct(png_read_ptr);
+
+ (*width)=0;
+ (*height)=0;
+
+ if (setjmp(png_read_ptr->jmpbuf)){
+ png_destroy_read_struct(&png_read_ptr, &info_ptr, (png_infopp)NULL);
+ return 0;
+ }
+
+ png_init_io(png_read_ptr,fd);
+ png_read_info(png_read_ptr, info_ptr);
+ (*width)=png_get_image_width(png_read_ptr, info_ptr);
+ (*height)=png_get_image_height(png_read_ptr, info_ptr);
+
+ png_destroy_read_struct(&png_read_ptr, &info_ptr, NULL);
+ if (*width >0 && *height >0)
+ return 1;
+ else
+ return 0;
+}
+
+
+
Modified: trunk/orca/packages/rrdtool-1.0.7.2/src/rrd_diff.c
==============================================================================
--- trunk/orca/packages/rrdtool-1.0.7.2/src/rrd_diff.c (original)
+++ trunk/orca/packages/rrdtool-1.0.7.2/src/rrd_diff.c Sat Jul 13 19:22:35 2002
@@ -1,5 +1,5 @@
/*****************************************************************************
- * RRDTOOL 0.99.31 Copyright Tobias Oetiker, 1999
+ * RRDtool Copyright Tobias Oetiker, 1999
* This code is stolen from rateup (mrtg-2.x) by Dave Rand
*****************************************************************************
* diff calculate the difference between two very long integers available as
Modified: trunk/orca/packages/rrdtool-1.0.7.2/src/rrd_tune.c
==============================================================================
--- trunk/orca/packages/rrdtool-1.0.7.2/src/rrd_tune.c (original)
+++ trunk/orca/packages/rrdtool-1.0.7.2/src/rrd_tune.c Sat Jul 13 19:22:35 2002
@@ -1,5 +1,5 @@
/*****************************************************************************
- * RRDTOOL 0.99.31 Copyright Tobias Oetiker, 1997, 1998, 1999
+ * RRDtool Copyright Tobias Oetiker, 1997, 1998, 1999
*****************************************************************************
* change header parameters of an rrd
*****************************************************************************
@@ -107,7 +107,8 @@
rrd_free(&rrd);
return -1;
}
- strncpy(rrd.ds_def[ds].dst,dst,DST_SIZE);
+ strncpy(rrd.ds_def[ds].dst,dst,DST_SIZE-1);
+ rrd.ds_def[ds].dst[DST_SIZE-1]='\0';
rrd.pdp_prep[ds].last_ds[0] = 'U';
rrd.pdp_prep[ds].last_ds[1] = 'N';
@@ -127,7 +128,8 @@
rrd_free(&rrd);
return -1;
}
- strncpy(rrd.ds_def[ds].ds_nam,ds_new,DS_NAM_SIZE);
+ strncpy(rrd.ds_def[ds].ds_nam,ds_new,DS_NAM_SIZE-1);
+ rrd.ds_def[ds].ds_nam[DS_NAM_SIZE-1]='\0';
break;
case '?':
rrd_set_error("unknown option '%s'",argv[optind-1]);
Modified: trunk/orca/packages/rrdtool-1.0.7.2/src/rrd_tool.c
==============================================================================
--- trunk/orca/packages/rrdtool-1.0.7.2/src/rrd_tool.c (original)
+++ trunk/orca/packages/rrdtool-1.0.7.2/src/rrd_tool.c Sat Jul 13 19:22:35 2002
@@ -1,5 +1,5 @@
/*****************************************************************************
- * RRDTOOL 0.99.31 Copyright Tobias Oetiker, 1997, 1998, 1999
+ * RRDtool Copyright Tobias Oetiker, 1997, 1998, 1999
*****************************************************************************
* rrd_tool.c Startup wrapper
*****************************************************************************
@@ -21,7 +21,7 @@
void PrintUsage(void)
{
printf("\n"
- "RRD TOOL 0.99.28 Copyright (C) 1999 by Tobias Oetiker <tobi at oetiker.ch>\n\n"
+ "RRDtool Copyright (C) 1999 by Tobias Oetiker <tobi at oetiker.ch>\n\n"
"Usage: rrdtool [options] command command_options\n\n"
"Valid commands and command_options are listed below.\n\n"
@@ -30,14 +30,18 @@
"\t\t[--step|-s step]\n"
"\t\t[DS:ds-name:DST:heartbeat:min:max] [RRA:CF:xff:steps:rows]\n\n"
- "* dump - dump an RRD\n\n"
- "\trrdtool dump filename.rrd [--full|-f]\n\n"
+ "* dump - dump an RRD to XML\n\n"
+ "\trrdtool dump filename.rrd >filename.xml\n\n"
+
+ "* restore - restore an RRD file from its XML form\n\n"
+ "\trrdtool restore [--range-check|-r] filename.xml filename.rrd\n\n"
"* last - show last update time for RRD\n\n"
"\trrdtool last filename.rrd\n\n"
"* update - update an RRD\n\n"
"\trrdtool update filename\n"
+ "\t\t--template|-t ds-name:ds-name:...\n"
"\t\ttime|N:value[:value...]\n\n"
"\t\t[ time:value[:value...] ..]\n\n"
@@ -49,11 +53,15 @@
"* graph - generate a graph from one or several RRD\n\n"
"\trrdtool graph filename [-s|--start seconds] [-e|--end seconds]\n"
"\t\t[-x|--x-grid x-axis grid and label]\n"
+ "\t\t[--alt-y-grid]\n"
"\t\t[-y|--y-grid y-axis grid and label]\n"
"\t\t[-v|--vertical-label string] [-w|--width pixels]\n"
"\t\t[-h|--height pixels] [-o|--logarithmic]\n"
- "\t\t[-u|--upper-limit value]\n"
+ "\t\t[-u|--upper-limit value] [-z|--lazy]\n"
"\t\t[-l|--lower-limit value] [-r|--rigid]\n"
+ "\t\t[--alt-autoscale]\n"
+ "\t\t[-f|--imginfo printfstr]\n"
+ "\t\t[-a|--imgformat GIF|PNG]\n"
"\t\t[-c|--color COLORTAG#rrggbb] [-t|--title string]\n"
"\t\t[DEF:vname=rrd:ds-name:CF]\n"
"\t\t[CDEF:vname=rpn-expression]\n"
@@ -76,7 +84,7 @@
" * resize - alter the lenght of one of the RRAs in an RRD\n\n"
"\trrdtool resize filename rranum GROW|SHRINK rows\n\n"
- "RRD TOOL is distributed under the Terms of the GNU General\n"
+ "RRDtool is distributed under the Terms of the GNU General\n"
"Public License Version 2. (www.gnu.org/copyleft/gpl.html)\n\n"
"For more information read the RRD manpages\n\n");
@@ -87,7 +95,12 @@
{
char **myargv;
char aLine[MAX_LENGTH];
-
+#ifdef MUST_DISABLE_SIGFPE
+ signal(SIGFPE,SIG_IGN);
+#endif
+#ifdef MUST_DISABLE_FPMASK
+ fpsetmask(0);
+#endif
if (argc == 1)
{
PrintUsage();
@@ -154,6 +167,9 @@
if (argc < 3
|| strcmp("help", argv[1]) == 0
+ || strcmp("--help", argv[1]) == 0
+ || strcmp("-help", argv[1]) == 0
+ || strcmp("-?", argv[1]) == 0
|| strcmp("-h", argv[1]) == 0 ) {
PrintUsage();
return 0;
@@ -163,6 +179,14 @@
rrd_create(argc-1, &argv[1]);
else if (strcmp("dump", argv[1]) == 0)
rrd_dump(argc-1, &argv[1]);
+ else if (strcmp("--version", argv[1]) == 0 ||
+ strcmp("version", argv[1]) == 0 ||
+ strcmp("v", argv[1]) == 0 ||
+ strcmp("-v", argv[1]) == 0 ||
+ strcmp("-version", argv[1]) == 0 )
+ printf("RRDtool Copyright (C) 1999 by Tobias Oetiker <tobi at oetiker.ch>\n");
+ else if (strcmp("restore", argv[1]) == 0)
+ rrd_restore(argc-1, &argv[1]);
else if (strcmp("resize", argv[1]) == 0)
rrd_resize(argc-1, &argv[1]);
else if (strcmp("last", argv[1]) == 0)
@@ -196,7 +220,6 @@
char **calcpr;
int xsize, ysize;
int i;
- calcpr = NULL;
if( rrd_graph(argc-1, &argv[1], &calcpr, &xsize, &ysize) != -1 ) {
if (strcmp(argv[2],"-") != 0)
printf ("%dx%d\n",xsize,ysize);
Added: trunk/orca/packages/rrdtool-1.0.7.2/src/Makefile.am
==============================================================================
--- trunk/orca/packages/rrdtool-1.0.7.2/src/Makefile.am (original)
+++ trunk/orca/packages/rrdtool-1.0.7.2/src/Makefile.am Sat Jul 13 19:22:35 2002
@@ -0,0 +1,72 @@
+## Process this file with automake to produce Makefile.in
+
+#AUTOMAKE_OPTIONS = foreign
+#
+#ACLOCAL_M4 = $(top_srcdir)/config/aclocal.m4
+#AUTOHEADER = @AUTOHEADER@ --localdir=$(top_srcdir)/config
+
+CGI_LIB_DIR = $(top_srcdir)/@CGI_LIB_DIR@
+GD_LIB_DIR = $(top_srcdir)/@GD_LIB_DIR@
+PNG_LIB_DIR = $(top_srcdir)/@PNG_LIB_DIR@
+ZLIB_LIB_DIR = $(top_srcdir)/@ZLIB_LIB_DIR@
+
+INCLUDES = -I$(CGI_LIB_DIR) -I$(GD_LIB_DIR) -I$(PNG_LIB_DIR) -I$(ZLIB_LIB_DIR)
+
+#COMPILE = $(CC) $(DEFS) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) $(CFLAGS_EXTRA)
+#LTCOMPILE = $(LIBTOOL) --mode=compile $(CC) $(DEFS) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) $(CFLAGS_EXTRA)
+#LINK = $(LIBTOOL) --mode=link $(CCLD) $(AM_CFLAGS) $(CFLAGS) $(CFLAGS_EXTRA) $(LDFLAGS) -o $@
+
+RRD_C_FILES = \
+ gdpng.c \
+ getopt.c \
+ getopt1.c \
+ gifsize.c \
+ parsetime.c \
+ pngsize.c \
+ rrd_create.c \
+ rrd_diff.c \
+ rrd_dump.c \
+ rrd_error.c \
+ rrd_fetch.c \
+ rrd_format.c \
+ rrd_graph.c \
+ rrd_last.c \
+ rrd_open.c \
+ rrd_resize.c \
+ rrd_restore.c \
+ rrd_tune.c \
+ rrd_update.c \
+ getopt.h ntconfig.h parsetime.h rrd_format.h rrd_tool.h
+
+# Build two libraries. One is a public one that gets installed in
+# $prefix/lib. Libtool does not create an archive of the PIC compiled
+# objects for this library type. The second library is a private one
+# meant to build the RRDs.so for perl-shared. In this case libtool
+# creates a ./.lib/*.al file that contains the PIC compiled object
+# files.
+
+RRD_LIBS = \
+ $(CGI_LIB_DIR)/librrd_cgi.la \
+ $(GD_LIB_DIR)/librrd_gd.la \
+ $(PNG_LIB_DIR)/librrd_png.la \
+ $(ZLIB_LIB_DIR)/librrd_z.la
+
+lib_LTLIBRARIES = librrd.la
+noinst_LTLIBRARIES = librrd_private.la
+
+librrd_la_SOURCES = $(RRD_C_FILES)
+librrd_private_la_SOURCES = $(RRD_C_FILES)
+
+librrd_la_LIBADD = $(RRD_LIBS)
+librrd_private_la_LIBADD = $(RRD_LIBS)
+librrd_la_LDFLAGS = -version-info 0:0:0
+
+bin_PROGRAMS = rrdcgi rrdtool
+
+rrdcgi_SOURCES = rrd_cgi.c
+rrdcgi_LDADD = librrd.la
+
+rrdtool_SOURCES = rrd_tool.c
+rrdtool_LDADD = librrd.la
+
+EXTRA_DIST= rrdtool.dsp rrdtool.dsw
Modified: trunk/orca/packages/rrdtool-1.0.7.2/src/parsetime.c
==============================================================================
--- trunk/orca/packages/rrdtool-1.0.7.2/src/parsetime.c (original)
+++ trunk/orca/packages/rrdtool-1.0.7.2/src/parsetime.c Sat Jul 13 19:22:36 2002
@@ -1,4 +1,4 @@
-/*
+/*
* parsetime.c - parse time for at(1)
* Copyright (C) 1993, 1994 Thomas Koenig
*
@@ -9,6 +9,8 @@
* (including the new syntax being useful for RRDB)
* Copyright (C) 1999 Oleg Cherevko (aka Olwi Deer)
*
+ * severe structural damage inflicted by Tobi Oetiker in 1999
+ *
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
@@ -45,12 +47,13 @@
* TIME-REFERENCE ::= NOW | TIME-OF-DAY-SPEC [ DAY-SPEC-1 ] |
* [ TIME-OF-DAY-SPEC ] DAY-SPEC-2
*
- * TIME-OF-DAY-SPEC ::= NUMBER [(':'|'.') NUMBER] [am|pm] | # HH:MM HH.MM HH[MM]
+ * TIME-OF-DAY-SPEC ::= NUMBER (':') NUMBER [am|pm] | # HH:MM
* 'noon' | 'midnight' | 'teatime'
*
* DAY-SPEC-1 ::= NUMBER '/' NUMBER '/' NUMBER | # MM/DD/[YY]YY
* NUMBER '.' NUMBER '.' NUMBER | # DD.MM.[YY]YY
- * NUMBER # DDMM[YY]YY
+ * NUMBER # Seconds since 1970
+ * NUMBER # YYYYMMDD
*
* DAY-SPEC-2 ::= MONTH-NAME NUMBER [NUMBER] | # Month DD [YY]YY
* 'yesterday' | 'today' | 'tomorrow' |
@@ -106,17 +109,10 @@
/* System Headers */
-
-#include <stdio.h>
-#include <stdlib.h>
-#include <string.h>
-#include <time.h>
-#include <ctype.h>
-#include <stdarg.h>
-
/* Local headers */
-#include "parsetime.h"
+#include "rrd_tool.h"
+#include <stdarg.h>
/* Structures and unions */
@@ -240,7 +236,7 @@
static char *sct; /* scanner - next char pointer in current argument */
static int need; /* scanner - need to advance to next argument */
-static char *sc_token; /* scanner - token buffer */
+static char *sc_token=NULL; /* scanner - token buffer */
static size_t sc_len; /* scanner - lenght of token buffer */
static int sc_tokid; /* scanner - token id */
@@ -264,6 +260,8 @@
* should return TIME_OK (aka NULL) or pointer to the error message,
* and should be called like this: try(func(args));
*
+ * if the try is not successfull it will reset the token pointer ...
+ *
* [NOTE: when try(...) is used as the only statement in the "if-true"
* part of the if statement that also has an "else" part it should be
* either enclosed in the curly braces (despite the fact that it looks
@@ -285,7 +283,6 @@
*/
#define panic(e) { \
- EnsureMemFree(); \
return (e); \
}
@@ -316,6 +313,33 @@
return( err );
}
+/* Compare S1 and S2, ignoring case, returning less than, equal to or
+ greater than zero if S1 is lexiographically less than,
+ equal to or greater than S2. -- copied from GNU libc*/
+static int
+mystrcasecmp (s1, s2)
+ const char *s1;
+ const char *s2;
+{
+ const unsigned char *p1 = (const unsigned char *) s1;
+ const unsigned char *p2 = (const unsigned char *) s2;
+ unsigned char c1, c2;
+
+ if (p1 == p2)
+ return 0;
+
+ do
+ {
+ c1 = tolower (*p1++);
+ c2 = tolower (*p2++);
+ if (c1 == '\0')
+ break;
+ }
+ while (c1 == c2);
+
+ return c1 - c2;
+}
+
/*
* parse a token, checking if it's something special to us
*/
@@ -325,7 +349,7 @@
int i;
for (i=0; Specials[i].name != NULL; i++)
- if (strcasecmp(Specials[i].name, arg) == 0)
+ if (mystrcasecmp(Specials[i].name, arg) == 0)
return sc_tokid = Specials[i].value;
/* not special - must be some random id */
@@ -333,6 +357,7 @@
} /* parse_token */
+
/*
* init_scanner() sets up the scanner to eat arguments
*/
@@ -346,7 +371,7 @@
while (argc-- > 0)
sc_len += strlen(*argv++);
- sc_token = (char *) malloc(sc_len);
+ sc_token = (char *) malloc(sc_len*sizeof(char));
if( sc_token == NULL )
return "Failed to allocate memory";
need_to_free = 1;
@@ -362,7 +387,7 @@
int idx;
while (1) {
- memset(sc_token, 0, sc_len);
+ memset(sc_token, '\0', sc_len);
sc_tokid = EOF;
idx = 0;
@@ -381,7 +406,7 @@
* we'll continue, which puts us up at the top of the while loop
* to fetch the next argument in
*/
- while (isspace(*sct) || *sct == '_' || *sct == ',' )
+ while (isspace((unsigned char)*sct) || *sct == '_' || *sct == ',' )
++sct;
if (!*sct) {
need = 1;
@@ -394,16 +419,16 @@
/* then see what it is
*/
- if (isdigit(sc_token[0])) {
- while (isdigit(*sct))
+ if (isdigit((unsigned char)(sc_token[0]))) {
+ while (isdigit((unsigned char)(*sct)))
sc_token[++idx] = *sct++;
- sc_token[++idx] = 0;
+ sc_token[++idx] = '\0';
return sc_tokid = NUMBER;
}
- else if (isalpha(sc_token[0])) {
- while (isalpha(*sct))
+ else if (isalpha((unsigned char)(sc_token[0]))) {
+ while (isalpha((unsigned char)(*sct)))
sc_token[++idx] = *sct++;
- sc_token[++idx] = 0;
+ sc_token[++idx] = '\0';
return parse_token(sc_token);
}
else switch(sc_token[0]) {
@@ -412,7 +437,10 @@
case '+': return sc_tokid = PLUS;
case '-': return sc_tokid = MINUS;
case '/': return sc_tokid = SLASH;
- default: return sc_tokid = JUNK;
+ default:
+ /*OK, we did not make it ... */
+ sct--;
+ return sc_tokid = EOF;
}
} /* while (1) */
} /* token */
@@ -477,8 +505,8 @@
break;
default:
- if( delta < 25 ) /* it may be some other value but in the context
- * of RRD who needs less than 25 min deltas? */
+ if( delta < 6 ) /* it may be some other value but in the context
+ * of RRD who needs less than 6 min deltas? */
sc_tokid = MONTHS;
else
sc_tokid = MINUTES;
@@ -496,7 +524,7 @@
delta *= 7;
/* FALLTHRU */
case DAYS:
- ptv->tm.tm_mday += (op == PLUS) ? delta : -delta;
+ ptv->tm.tm_mday += (op == PLUS) ? delta : -delta;
return TIME_OK;
case HOURS:
ptv->offset += (op == PLUS) ? delta*60*60 : -delta*60*60;
@@ -507,6 +535,9 @@
case SECONDS:
ptv->offset += (op == PLUS) ? delta : -delta;
return TIME_OK;
+ default: /*default unit is seconds */
+ ptv->offset += (op == PLUS) ? delta : -delta;
+ return TIME_OK;
}
panic(e("well-known time unit expected after %d", delta));
/* NORETURN */
@@ -522,32 +553,39 @@
{
int hour, minute = 0;
int tlen;
+ /* save token status in case we must abort */
+ int scc_sv = scc;
+ char *sct_sv = sct;
+ int sc_tokid_sv = sc_tokid;
- hour = atoi(sc_token);
tlen = strlen(sc_token);
+
+ /* first pick out the time of day - we assume a HH (COLON|DOT) MM time
+ */
+ if (tlen > 2) {
+ return TIME_OK;
+ }
+
+ hour = atoi(sc_token);
- /* first pick out the time of day - if it's 4 digits, we assume
- * a HHMM time, otherwise it's HH (COLON|DOT) MM time
- */
token();
- if (sc_tokid == COLON || sc_tokid == DOT) {
+ if (sc_tokid == SLASH || sc_tokid == DOT) {
+ /* guess we are looking at a date */
+ scc = scc_sv;
+ sct = sct_sv;
+ sc_tokid = sc_tokid_sv;
+ sprintf (sc_token,"%d", hour);
+ return TIME_OK;
+ }
+ if (sc_tokid == COLON ) {
try(expect(NUMBER,
- "Parsing HH%cMM syntax, expecting MM as number, got none",
- sc_tokid == DOT ? '.' : ':' ));
+ "Parsing HH:MM syntax, expecting MM as number, got none"));
minute = atoi(sc_token);
if (minute > 59) {
- panic(e("parsing HH%cMM syntax, got MM = %d (>59!)",
- sc_tokid == DOT ? '.' : ':', minute ));
+ panic(e("parsing HH:MM syntax, got MM = %d (>59!)", minute ));
}
token();
}
- else if (tlen == 4) {
- minute = hour%100;
- if (minute > 59) {
- panic(e("parsing HHMM syntax, got MM = %d (>59!)", minute ));
- }
- hour = hour/100;
- }
/* check if an AM or PM specifier was given
*/
@@ -563,9 +601,14 @@
hour = 0;
}
token();
- }
+ }
else if (hour > 23) {
- panic(e("the time-of-day hour > 23"));
+ /* guess it was not a time then ... */
+ scc = scc_sv;
+ sct = sct_sv;
+ sc_tokid = sc_tokid_sv;
+ sprintf (sc_token,"%d", hour);
+ return TIME_OK;
}
ptv->tm.tm_hour = hour;
ptv->tm.tm_min = minute;
@@ -615,7 +658,7 @@
static char *
day(struct time_value *ptv)
{
- long mday, wday, mon, year = ptv->tm.tm_year;
+ long mday=0, wday, mon, year = ptv->tm.tm_year;
int tlen;
switch (sc_tokid) {
@@ -665,48 +708,49 @@
*/
case NUMBER:
- /* get numeric DDMM[YY]YY, MM/DD/[YY]YY, or DD.MM.[YY]YY
+ /* get numeric <sec since 1970>, MM/DD/[YY]YY, or DD.MM.[YY]YY
*/
tlen = strlen(sc_token);
mon = atol(sc_token);
- token();
-
- if (sc_tokid == SLASH || sc_tokid == DOT) {
- int sep;
+ if (mon > 10*356*24*60*60) {
+ ptv->tm=*localtime(&mon);
+ token();
+ break;
+ }
+ if (mon > 19700101 && mon < 24000101){ /*works between 1900 and 2400 */
+ char cmon[3],cmday[3],cyear[5];
+ strncpy(cyear,sc_token,4);cyear[4]='\0';
+ year = atol(cyear);
+ strncpy(cmon,&(sc_token[4]),2);cmon[2]='\0';
+ mon = atol(cmon);
+ strncpy(cmday,&(sc_token[6]),2);cmday[2]='\0';
+ mday = atol(cmday);
+ token();
+ } else {
+ token();
+
+ if (mon <= 31 && (sc_tokid == SLASH || sc_tokid == DOT)) {
+ int sep;
sep = sc_tokid;
try(expect(NUMBER,"there should be %s number after '%c'",
sep == DOT ? "month" : "day", sep == DOT ? '.' : '/'));
mday = atol(sc_token);
if (token() == sep) {
- try(expect(NUMBER,"there should be year number after '%c'",
- sep == DOT ? '.' : '/'));
- year = atol(sc_token);
- token();
+ try(expect(NUMBER,"there should be year number after '%c'",
+ sep == DOT ? '.' : '/'));
+ year = atol(sc_token);
+ token();
}
-
+
/* flip months and days for european timing
*/
if (sep == DOT) {
- long x = mday;
- mday = mon;
- mon = x;
- }
- }
- else if (tlen == 6 || tlen == 8) {
- if (tlen == 8) {
- year = (mon % 10000) - 1900;
- mon /= 10000;
+ long x = mday;
+ mday = mon;
+ mon = x;
}
- else {
- year = mon % 100;
- mon /= 100;
- }
- mday = mon / 100;
- mon %= 100;
- }
- else {
- panic(e("can't parse your date as DDMM[YY]YY"));
+ }
}
mon--;
@@ -715,13 +759,12 @@
}
if(mday < 1 || mday > 31) {
panic(e("I'm afraid that %d is not a valid day of the month",
- mday));
- }
-
+ mday));
+ }
try(assign_date(ptv, mday, mon, year));
break;
} /* case */
- return TIME_OK;
+ return TIME_OK;
} /* month */
@@ -752,7 +795,7 @@
ptv->type = ABSOLUTE_TIME;
ptv->offset = 0;
ptv->tm = *localtime(&now);
- ptv->tm.tm_isdst = -1; /* ?not needed? */
+ ptv->tm.tm_isdst = -1; /* mk time can figure this out for us ... */
token();
switch (sc_tokid) {
@@ -784,15 +827,21 @@
}
else
if( sc_tokid != EOF ) {
- panic(e("'now' can be followed only be +|- offset"));
+ panic(e("if 'now' is followed by a token it must be +|- offset"));
}
};
break;
/* Only absolute time specifications below */
case NUMBER:
- try(tod(ptv));
- try(day(ptv));
+ try(tod(ptv))
+ if (sc_tokid != NUMBER) break;
+ /* fix month parsing */
+ case JAN: case FEB: case MAR: case APR: case MAY: case JUN:
+ case JUL: case AUG: case SEP: case OCT: case NOV: case DEC:
+ try(day(ptv));
+ if (sc_tokid != NUMBER) break;
+ try(tod(ptv))
break;
/* evil coding for TEATIME|NOON|MIDNIGHT - we've initialised
@@ -815,11 +864,11 @@
}
ptv->tm.tm_hour = hr;
ptv->tm.tm_min = 0;
+ ptv->tm.tm_sec = 0;
token();
- /* fall through to month setting */
- /* FALLTHRU */
+ break;
default:
- try(day(ptv));
+ panic(e("unparsable time: %s%s",sc_token,sct));
break;
} /* ugly case statement */
@@ -843,7 +892,7 @@
/* now we should be at EOF */
if( sc_tokid != EOF ) {
- panic(e("unparsable trailing text: '...%s%s'", sc_token, sct ));
+ panic(e("unparsable trailing text: '...%s%s'", sc_token, sct));
}
ptv->tm.tm_isdst = -1; /* for mktime to guess DST status */
@@ -853,5 +902,60 @@
/* when winter -> summer time correction eats a hour */
panic(e("the specified time is incorrect (out of range?)"));
}
+ EnsureMemFree();
return TIME_OK;
} /* parsetime */
+
+
+int proc_start_end (struct time_value *start_tv,
+ struct time_value *end_tv,
+ time_t *start,
+ time_t *end){
+ if (start_tv->type == RELATIVE_TO_END_TIME && /* same as the line above */
+ end_tv->type == RELATIVE_TO_START_TIME) {
+ rrd_set_error("the start and end times cannot be specified "
+ "relative to each other");
+ return -1;
+ }
+
+ if (start_tv->type == RELATIVE_TO_START_TIME) {
+ rrd_set_error("the start time cannot be specified relative to itself");
+ return -1;
+ }
+
+ if (end_tv->type == RELATIVE_TO_END_TIME) {
+ rrd_set_error("the end time cannot be specified relative to itself");
+ return -1;
+ }
+
+ if( start_tv->type == RELATIVE_TO_END_TIME) {
+ struct tm tmtmp;
+ *end = mktime(&(end_tv->tm)) + end_tv->offset;
+ tmtmp = *localtime(end); /* reinit end including offset */
+ tmtmp.tm_mday += start_tv->tm.tm_mday;
+ tmtmp.tm_mon += start_tv->tm.tm_mon;
+ tmtmp.tm_year += start_tv->tm.tm_year;
+ *start = mktime(&tmtmp) + start_tv->offset;
+ } else {
+ *start = mktime(&(start_tv->tm)) + start_tv->offset;
+ }
+ if (end_tv->type == RELATIVE_TO_START_TIME) {
+ struct tm tmtmp;
+ *start = mktime(&(start_tv->tm)) + start_tv->offset;
+ tmtmp = *localtime(start);
+ tmtmp.tm_mday += end_tv->tm.tm_mday;
+ tmtmp.tm_mon += end_tv->tm.tm_mon;
+ tmtmp.tm_year += end_tv->tm.tm_year;
+ *end = mktime(&tmtmp) + end_tv->offset;
+ } else {
+ *end = mktime(&(end_tv->tm)) + end_tv->offset;
+ }
+ return 0;
+} /* proc_start_end */
+
+
+
+
+
+
+
Modified: trunk/orca/packages/rrdtool-1.0.7.2/src/rrdtool.dsp
==============================================================================
--- trunk/orca/packages/rrdtool-1.0.7.2/src/rrdtool.dsp (original)
+++ trunk/orca/packages/rrdtool-1.0.7.2/src/rrdtool.dsp Sat Jul 13 19:22:36 2002
@@ -42,7 +42,7 @@
# PROP Ignore_Export_Lib 0
# PROP Target_Dir ""
# ADD BASE CPP /nologo /W3 /GX /O2 /D "WIN32" /D "NDEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c
-# ADD CPP /nologo /MT /W3 /GX /O2 /I "." /I "..\gd1.2" /D "WIN32" /D "NDEBUG" /D "_WINDOWS" /D "_MBCS" /D "_CTYPE_DISABLE_MACROS" /FD /c
+# ADD CPP /nologo /MT /W3 /GX /O2 /I "." /I "..\gd1.3" /I "..\libpng-1.0.3" /I "..\zlib-1.1.3" /D "WIN32" /D "NDEBUG" /D "_WINDOWS" /D "_MBCS" /D "_CTYPE_DISABLE_MACROS" /FD /c
# SUBTRACT CPP /YX
# ADD BASE RSC /l 0x100c /d "NDEBUG"
# ADD RSC /l 0x100c /d "NDEBUG"
@@ -51,7 +51,7 @@
# ADD BSC32 /nologo
LINK32=link.exe
# ADD BASE LINK32 kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /nologo /subsystem:console /machine:I386
-# ADD LINK32 kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib ..\gd1.2\release\gd.lib release\rrd.lib /nologo /subsystem:console /incremental:yes /debug /machine:I386
+# ADD LINK32 kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib ..\gd1.3\release\gd.lib release\rrd.lib /nologo /subsystem:console /incremental:yes /debug /machine:I386
!ELSEIF "$(CFG)" == "rrdtool - Win32 Debug"
@@ -67,7 +67,7 @@
# PROP Ignore_Export_Lib 0
# PROP Target_Dir ""
# ADD BASE CPP /nologo /W3 /Gm /GX /Zi /Od /D "WIN32" /D "_DEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c
-# ADD CPP /nologo /MTd /W3 /Gm /GX /ZI /Od /I "." /I "..\gd1.2" /D "WIN32" /D "_DEBUG" /D "_CONSOLE" /D "_MBCS" /D "_CTYPE_DISABLE_MACROS" /FR /FD /c
+# ADD CPP /nologo /MTd /W3 /Gm /GX /ZI /Od /I "." /I "..\gd1.3" /I "..\libpng-1.0.3" /I "..\zlib-1.1.3" /D "WIN32" /D "_DEBUG" /D "_CONSOLE" /D "_MBCS" /D "_CTYPE_DISABLE_MACROS" /FR /FD /c
# SUBTRACT CPP /YX
# ADD BASE RSC /l 0x100c /d "_DEBUG"
# ADD RSC /l 0x100c /d "_DEBUG"
@@ -76,7 +76,7 @@
# ADD BSC32 /nologo /o"rrdtool.bsc"
LINK32=link.exe
# ADD BASE LINK32 kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /nologo /subsystem:console /debug /machine:I386 /pdbtype:sept
-# ADD LINK32 kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib ..\gd1.2\debug\gd.lib debug\rrd.lib /nologo /subsystem:console /debug /machine:I386 /pdbtype:sept
+# ADD LINK32 kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib ..\gd1.3\debug\gd.lib debug\rrd.lib /nologo /subsystem:console /debug /machine:I386 /pdbtype:sept
!ENDIF
Modified: trunk/orca/packages/rrdtool-1.0.7.2/src/rrd_tool.h
==============================================================================
--- trunk/orca/packages/rrdtool-1.0.7.2/src/rrd_tool.h (original)
+++ trunk/orca/packages/rrdtool-1.0.7.2/src/rrd_tool.h Sat Jul 13 19:22:36 2002
@@ -1,5 +1,5 @@
/*****************************************************************************
- * RRDTOOL 0.99.31 Copyright Tobias Oetiker, 1997,1998, 1999
+ * RRDtool Copyright Tobias Oetiker, 1997,1998, 1999
*****************************************************************************
* rrd_tool.h Common Header File
*****************************************************************************
@@ -22,6 +22,14 @@
#endif
#endif
+#ifdef MUST_DISABLE_SIGFPE
+#include <signal.h>
+#endif
+
+#ifdef MUST_DISABLE_FPMASK
+#include <floatingpoint.h>
+#endif
+
#include <stdio.h>
#include <stdlib.h>
#include <errno.h>
@@ -50,9 +58,8 @@
#endif /* __svr4__ && __sun__ */
#endif
-#ifdef WANT_AT_STYLE_TIMESPEC
#include "parsetime.h"
-#endif
+int proc_start_end (struct time_value *, struct time_value *, time_t *, time_t *);
#ifndef WIN32
@@ -67,7 +74,8 @@
#include <float.h> /* for _isnan */
#define isnan _isnan
-
+#define finite _finite
+#define isinf(a) (_fpclass(a) == _FPCLASS_NINF || _fpclass(a) == _FPCLASS_PINF)
#endif
/* local include files -- need to be after the system ones */
@@ -91,6 +99,7 @@
int rrd_fetch(int argc, char **argv,
time_t *start, time_t *end, unsigned long *step,
unsigned long *ds_cnt, char ***ds_namv, rrd_value_t **data);
+int rrd_restore(int argc, char **argv);
int rrd_dump(int argc, char **argv);
int rrd_tune(int argc, char **argv);
time_t rrd_last(int argc, char **argv);
@@ -102,7 +111,11 @@
int rrd_test_error(void);
char *rrd_get_error(void);
int LockRRD(FILE *);
-
+int GifSize(FILE *, long *, long *);
+int PngSize(FILE *, long *, long *);
+int PngSize(FILE *, long *, long *);
+#include <gd.h>
+void gdImagePng(gdImagePtr im, FILE *out);
int rrd_create_fn(char *file_name, rrd_t *rrd);
int rrd_fetch_fn(char *filename, enum cf_en cf_idx,
time_t *start,time_t *end,
@@ -114,6 +127,7 @@
void rrd_init(rrd_t *rrd);
int rrd_open(char *file_name, FILE **in_file, rrd_t *rrd, int rdwr);
+int readfile(char *file, char **buffer, int skipfirst);
#define RRD_READONLY 0
Modified: trunk/orca/packages/rrdtool-1.0.7.2/src/rrd_update.c
==============================================================================
--- trunk/orca/packages/rrdtool-1.0.7.2/src/rrd_update.c (original)
+++ trunk/orca/packages/rrdtool-1.0.7.2/src/rrd_update.c Sat Jul 13 19:22:36 2002
@@ -1,5 +1,5 @@
/*****************************************************************************
- * RRDTOOL 0.99.31 Copyright Tobias Oetiker, 1997, 1998, 1999
+ * RRDtool Copyright Tobias Oetiker, 1997, 1998, 1999
*****************************************************************************
* rrd_update.c RRD Update Function
*****************************************************************************
@@ -15,8 +15,6 @@
#include <sys/locking.h>
#include <sys/stat.h>
#include <io.h>
-#else
- #include <unistd.h>
#endif
@@ -39,6 +37,9 @@
* area in the rrd file. this
* pointer changes as each rrd is
* processed. */
+ unsigned long rra_current; /* byte pointer to the current write
+ * spot in the rrd file. */
+ unsigned long rra_pos_tmp; /* temporary byte pointer. */
unsigned long interval,
pre_int,post_int; /* interval between this and
* the last run */
@@ -63,7 +64,7 @@
long *tmpl_idx; /* index representing the settings
transported by the template index */
- long tmpl_max = 0;
+ long tmpl_cnt = 2; /* time and data */
FILE *rrd_file;
rrd_t rrd;
@@ -72,7 +73,6 @@
int wrote_to_file = 0;
char *template = NULL;
- rrd_init(&rrd);
while (1) {
static struct option long_options[] =
{
@@ -108,8 +108,7 @@
if(rrd_open(argv[optind],&rrd_file,&rrd, RRD_READWRITE)==-1){
return -1;
}
- rra_begin=ftell(rrd_file);
- rra_start=rra_begin;
+ rra_current = rra_start = rra_begin = ftell(rrd_file);
/* get exclusive lock to whole file.
* lock gets removed when we close the file.
@@ -147,27 +146,35 @@
return(-1);
}
/* initialize template redirector */
+
+ /* the first entry [0] points to zero as the first entry in an
+ update argument is always the time. The remaining entries point
+ the the index number of their respective datasource. */
for (i=0;i<=rrd.stat_head->ds_cnt;i++) tmpl_idx[i]=i;
- tmpl_max=rrd.stat_head->ds_cnt;
+ tmpl_cnt=rrd.stat_head->ds_cnt+1;
if (template) {
char *dsname;
- long temp_len;
+ int tmpl_len;
dsname = template;
- temp_len = strlen(template);
- tmpl_max = 0;
- for (i=0;i<=temp_len;i++) {
- if (template[i] == ':' || template[i] =='\0') {
+ template++;
+ tmpl_cnt = 1; /* the first entry is the time */
+ tmpl_len = strlen(template);
+ for(i=0;i<=tmpl_len;i++) {
+ if (template[i] == ':' || template[i] == '\0') {
template[i] = '\0';
- if ((tmpl_idx[++tmpl_max] = ds_match(&rrd,dsname)) == -1){ free(updvals);
- free(pdp_temp);
- free(tmpl_idx);
- rrd_free(&rrd);
- fclose(rrd_file);
- return(-1);
+ if ((tmpl_idx[tmpl_cnt++] = ds_match(&rrd,dsname)) == -1){
+ free(updvals); free(pdp_temp);
+ free(tmpl_idx); rrd_free(&rrd);
+ fclose(rrd_file); return(-1);
}
/* the first element is always the time */
- tmpl_idx[tmpl_max]++;
+ tmpl_idx[tmpl_cnt-1]++;
+
+ /* go to the next entry on the template */
+ dsname = &template[i+1];
+
}
+
}
}
if ((pdp_new = malloc(sizeof(rrd_value_t)
@@ -183,35 +190,27 @@
/* loop through the arguments. */
for(arg_i=optind+1; arg_i<argc;arg_i++) {
- unsigned long count_colons = 0;
-
-/* fprintf(stderr, "Handling %s\t%s\n", argv[arg_i]); */
- /* parse the update line ... the first value is the time, the
- remaining ones are the datasources */
- /* skip any initial :'s */
- i=0;
- while (argv[arg_i][i] && argv[arg_i][i] == ':')
- ++i;
-
+ char *stepper;;
/* initialize all ds input to unknown except the first one
which has always got to be set */
- for(ii=1;ii<=rrd.stat_head->ds_cnt;ii++)
- updvals[ii] = "U";
+ for(ii=1;ii<=rrd.stat_head->ds_cnt;ii++) updvals[ii] = "U";
ii=0;
- updvals[ii++] = &argv[arg_i][i];
- for (;argv[arg_i][i] != '\0';++i) {
- if (argv[arg_i][i] == ':') {
- count_colons++;
- argv[arg_i][i] = '\0';
- if (ii<=tmpl_max) {
- updvals[tmpl_idx[ii++]] = &argv[arg_i][i+1];
+ stepper = argv[arg_i];
+ updvals[0]=stepper;
+ while (*stepper) {
+ if (*stepper == ':') {
+ *stepper = '\0';
+ ii++;
+ if (ii<tmpl_cnt){
+ updvals[tmpl_idx[ii]] = stepper+1;
}
}
+ stepper++;
}
- if (ii-1 != count_colons || ii-1 != tmpl_max) {
- rrd_set_error("expected %lu data source readings (got %lu) from %s...",
- tmpl_max, count_colons, argv[arg_i]);
+ if (ii+1 != tmpl_cnt) {
+ rrd_set_error("expected %lu data source readings (got %lu) from %s:...",
+ tmpl_cnt-1, ii, argv[arg_i]);
break;
}
@@ -231,11 +230,13 @@
/* seek to the beginning of the rrd's */
- if(fseek(rrd_file, rra_begin, SEEK_SET) != 0) {
- rrd_set_error("seek error in rrd");
- break;
+ if (rra_current != rra_begin) {
+ if(fseek(rrd_file, rra_begin, SEEK_SET) != 0) {
+ rrd_set_error("seek error in rrd");
+ break;
+ }
+ rra_current = rra_begin;
}
-
rra_start = rra_begin;
/* when was the current pdp started */
@@ -335,7 +336,7 @@
if(dst_idx == DST_COUNTER || dst_idx == DST_DERIVE){
strncpy(rrd.pdp_prep[i].last_ds,
updvals[i+1],LAST_DS_LEN-1);
- rrd.pdp_prep[i].last_ds[LAST_DS_LEN]=0;
+ rrd.pdp_prep[i].last_ds[LAST_DS_LEN-1]='\0';
}
}
/* break out of the argument parsing loop if the error_string is set */
@@ -451,13 +452,15 @@
#ifdef DEBUG
fprintf(stderr," -- RRA Preseek %ld\n",ftell(rrd_file));
#endif
-
- if(fseek(rrd_file,
- (rra_start + (rrd.stat_head->ds_cnt
- *rrd.rra_ptr[i].cur_row
- * sizeof(rrd_value_t))), SEEK_SET) != 0){
- rrd_set_error("seek error in rrd");
- break;
+ /* determine if a seek is even needed. */
+ rra_pos_tmp = rra_start +
+ rrd.stat_head->ds_cnt*rrd.rra_ptr[i].cur_row*sizeof(rrd_value_t);
+ if(rra_pos_tmp != rra_current) {
+ if(fseek(rrd_file, rra_pos_tmp, SEEK_SET) != 0){
+ rrd_set_error("seek error in rrd");
+ break;
+ }
+ rra_current = rra_pos_tmp;
}
#ifdef DEBUG
fprintf(stderr," -- RRA Postseek %ld\n",ftell(rrd_file));
@@ -562,6 +565,7 @@
rrd_set_error("writing rrd");
break;
}
+ rra_current += sizeof(rrd_value_t);
wrote_to_file = 1;
#ifdef DEBUG
Added: trunk/orca/packages/rrdtool-1.0.7.2/src/rrd_restore.c
==============================================================================
--- trunk/orca/packages/rrdtool-1.0.7.2/src/rrd_restore.c (original)
+++ trunk/orca/packages/rrdtool-1.0.7.2/src/rrd_restore.c Sat Jul 13 19:22:36 2002
@@ -0,0 +1,369 @@
+/*****************************************************************************
+ * RRDtool Copyright Tobias Oetiker, 1997, 1998, 1999
+ *****************************************************************************
+ * rrd_restore.c creates new rrd from data dumped by rrd_dump.c
+ *****************************************************************************/
+
+#include "rrd_tool.h"
+
+/* convert all ocurances of <BlaBlaBla> to <blablabla> */
+
+void xml_lc(char* buf){
+ int intag=0;
+ while((*buf)){
+ if (intag ==0 && (*buf) == '<') {
+ intag = 1;
+ }
+ else if (intag ==1 && (*buf) == '>') {
+ intag = 0;
+ continue;
+ } else if (intag ==1) {
+ *buf = tolower(*buf);
+ }
+ buf++;
+ }
+}
+
+int skip(char **buf){
+ char *ptr;
+ ptr=(*buf);
+ do {
+ (*buf)=ptr;
+ while((*(ptr+1)) && ((*ptr)==' ' || (*ptr)=='\r' || (*ptr)=='\n' || (*ptr)=='\t')) ptr++;
+ if (strncmp(ptr,"<!--",4) == 0) {
+ ptr= strstr(ptr,"-->");
+ if (ptr) ptr+=3; else {
+ rrd_set_error("Dangling Comment");
+ (*buf) = NULL;
+ return -1;
+ }
+ }
+ } while ((*buf)!=ptr);
+ return 1;
+}
+
+int eat_tag(char **buf, char *tag){
+ if ((*buf)==NULL) return -1; /* fall though clause */
+
+ rrd_clear_error();
+ skip(buf);
+ if ((**buf)=='<'
+ && strncmp((*buf)+1,tag,strlen(tag)) == 0
+ && *((*buf)+strlen(tag)+1)=='>') {
+ (*buf) += strlen(tag)+2;
+ }
+ else {
+ rrd_set_error("No <%s> tag found",tag);
+ (*buf) = NULL;
+ return -1;
+ }
+ skip(buf);
+ return 1;
+}
+
+int read_tag(char **buf, char *tag, char *format, void *value){
+ char *end_tag;
+ int matches;
+ if ((*buf)==NULL) return -1; /* fall though clause */
+ rrd_clear_error();
+ if (eat_tag(buf,tag)==1){
+ char *temp;
+ temp = (*buf);
+ while(*((*buf)+1) && (*(*buf) != '<')) (*buf)++; /*find start of endtag*/
+ *(*buf) = '\0';
+ matches =sscanf(temp,format,value);
+ *(*buf) = '<';
+ end_tag = malloc((strlen(tag)+2)*sizeof(char));
+ sprintf(end_tag,"/%s",tag);
+ eat_tag(buf,end_tag);
+ free(end_tag);
+ if (matches == 0 && strcmp(format,"%lf") == 0)
+ (*((double* )(value))) = DNAN;
+ if (matches != 1) return 0;
+ return 1;
+ }
+ return -1;
+}
+
+
+/* parse the data stored in buf and return a filled rrd structure */
+int xml2rrd(char* buf, rrd_t* rrd, char rc){
+ /* pass 1 identify number of RRAs */
+ char *ptr,*ptr2,*ptr3; /* walks thought the buffer */
+ long rows=0,mempool=0,i=0;
+ xml_lc(buf); /* lets lowercase all active parts of the xml */
+ ptr=buf;
+ ptr2=buf;
+ ptr3=buf;
+ /* start with an RRD tag */
+
+ eat_tag(&ptr,"rrd");
+ /* allocate static header */
+ if((rrd->stat_head = calloc(1,sizeof(stat_head_t)))==NULL){
+ rrd_set_error("allocating rrd.stat_head");
+ return -1;
+ };
+
+ strcpy(rrd->stat_head->cookie,RRD_COOKIE);
+ read_tag(&ptr,"version","%4[0-9]",rrd->stat_head->version);
+ rrd->stat_head->float_cookie = FLOAT_COOKIE;
+ rrd->stat_head->ds_cnt = 0;
+ rrd->stat_head->rra_cnt = 0;
+ read_tag(&ptr,"step","%lu",&(rrd->stat_head->pdp_step));
+
+ /* allocate live head */
+ if((rrd->live_head = calloc(1,sizeof(live_head_t)))==NULL){
+ rrd_set_error("allocating rrd.live_head");
+ return -1;
+ }
+ read_tag(&ptr,"lastupdate","%lu",&(rrd->live_head->last_up));
+
+ /* Data Source Definition Part */
+ ptr2 = ptr;
+ while (eat_tag(&ptr2,"ds") == 1){
+ rrd->stat_head->ds_cnt++;
+ if((rrd->ds_def = rrd_realloc(rrd->ds_def,rrd->stat_head->ds_cnt*sizeof(ds_def_t)))==NULL){
+ rrd_set_error("allocating rrd.ds_def");
+ return -1;
+ };
+ /* clean out memory to make sure no data gets stored from previous tasks */
+ memset(&(rrd->ds_def[rrd->stat_head->ds_cnt-1]), 0, sizeof(ds_def_t));
+ if((rrd->pdp_prep = rrd_realloc(rrd->pdp_prep,rrd->stat_head->ds_cnt
+ *sizeof(pdp_prep_t)))==NULL){
+ rrd_set_error("allocating pdp_prep");
+ return(-1);
+ }
+ /* clean out memory to make sure no data gets stored from previous tasks */
+ memset(&(rrd->pdp_prep[rrd->stat_head->ds_cnt-1]), 0, sizeof(pdp_prep_t));
+
+ read_tag(&ptr2,"name",DS_NAM_FMT,rrd->ds_def[rrd->stat_head->ds_cnt-1].ds_nam);
+
+ read_tag(&ptr2,"type",DST_FMT,rrd->ds_def[rrd->stat_head->ds_cnt-1].dst);
+ /* test for valid type */
+ if(dst_conv(rrd->ds_def[rrd->stat_head->ds_cnt-1].dst) == -1) return -1;
+
+ read_tag(&ptr2,"minimal_heartbeat","%lu",
+ &(rrd->ds_def[rrd->stat_head->ds_cnt-1].par[DS_mrhb_cnt].u_cnt));
+ read_tag(&ptr2,"min","%lf",&(rrd->ds_def[rrd->stat_head->ds_cnt-1].par[DS_min_val].u_val));
+ read_tag(&ptr2,"max","%lf",&(rrd->ds_def[rrd->stat_head->ds_cnt-1].par[DS_max_val].u_val));
+
+ read_tag(&ptr2,"last_ds","%30s",rrd->pdp_prep[rrd->stat_head->ds_cnt-1].last_ds);
+ read_tag(&ptr2,"value","%lf",&(rrd->pdp_prep[rrd->stat_head->ds_cnt-1].scratch[PDP_val].u_val));
+ read_tag(&ptr2,"unknown_sec","%lu",&(rrd->pdp_prep[i].scratch[PDP_unkn_sec_cnt].u_cnt));
+ eat_tag(&ptr2,"/ds");
+ ptr=ptr2;
+ }
+
+ ptr2 = ptr;
+ while (eat_tag(&ptr2,"rra") == 1){
+ long i;
+ rrd->stat_head->rra_cnt++;
+
+ /* alocate and reset rra definition areas */
+ if((rrd->rra_def = rrd_realloc(rrd->rra_def,rrd->stat_head->rra_cnt*sizeof(rra_def_t)))==NULL){
+ rrd_set_error("allocating rra_def"); return -1; }
+ memset(&(rrd->rra_def[rrd->stat_head->rra_cnt-1]), 0, sizeof(rra_def_t));
+
+ /* alocate and reset consolidation point areas */
+ if((rrd->cdp_prep = rrd_realloc(rrd->cdp_prep,
+ rrd->stat_head->rra_cnt
+ *rrd->stat_head->ds_cnt*sizeof(cdp_prep_t)))==NULL){
+ rrd_set_error("allocating cdp_prep"); return -1; }
+
+ memset(&(rrd->cdp_prep[rrd->stat_head->ds_cnt*(rrd->stat_head->rra_cnt-1)]),
+ 0, rrd->stat_head->ds_cnt*sizeof(cdp_prep_t));
+
+
+ read_tag(&ptr2,"cf",CF_NAM_FMT,rrd->rra_def[rrd->stat_head->rra_cnt-1].cf_nam);
+ /* test for valid type */
+ if(cf_conv(rrd->rra_def[rrd->stat_head->rra_cnt-1].cf_nam) == -1) return -1;
+
+ read_tag(&ptr2,"pdp_per_row","%lu",&(rrd->rra_def[rrd->stat_head->rra_cnt-1].pdp_cnt));
+
+ eat_tag(&ptr2,"cdp_prep");
+ for(i=0;i<rrd->stat_head->ds_cnt;i++){
+ eat_tag(&ptr2,"ds");
+ read_tag(&ptr2,"value","%lf",&(rrd->cdp_prep[rrd->stat_head->ds_cnt*
+ (rrd->stat_head->rra_cnt-1)
+ +i].scratch[CDP_val].u_val));
+ read_tag(&ptr2,"unknown_datapoints","%lu",&(rrd->cdp_prep[rrd->stat_head->ds_cnt
+ *(rrd->stat_head->rra_cnt-1)
+ +i].scratch[CDP_unkn_pdp_cnt].u_cnt));
+ eat_tag(&ptr2,"/ds");
+ }
+ eat_tag(&ptr2,"/cdp_prep");
+ rrd->rra_def[rrd->stat_head->rra_cnt-1].row_cnt=0;
+ eat_tag(&ptr2,"database");
+ ptr3 = ptr2;
+ while (eat_tag(&ptr3,"row") == 1){
+
+ if(mempool==0){
+ mempool = 1000;
+ if((rrd->rrd_value = rrd_realloc(rrd->rrd_value,
+ (rows+mempool)*(rrd->stat_head->ds_cnt)
+ *sizeof(rrd_value_t)))==NULL) {
+ rrd_set_error("allocating rrd_values"); return -1; }
+ }
+ rows++;
+ mempool--;
+ rrd->rra_def[rrd->stat_head->rra_cnt-1].row_cnt++;
+ for(i=0;i<rrd->stat_head->ds_cnt;i++){
+
+ rrd_value_t * value = &(rrd->rrd_value[(rows-1)*rrd->stat_head->ds_cnt+i]);
+
+ read_tag(&ptr3,"v","%lf", value);
+
+ if (
+ (rc == 1) /* do we have to check for the ranges */
+ &&
+ (!isnan(*value)) /* not a NAN value */
+ &&
+ ( /* min defined and in the range ? */
+ (!isnan(rrd->ds_def[i].par[DS_min_val].u_val)
+ && (*value < rrd->ds_def[i].par[DS_min_val].u_val))
+ || /* max defined and in the range ? */
+ (!isnan(rrd->ds_def[i].par[DS_max_val].u_val)
+ && (*value > rrd->ds_def[i].par[DS_max_val].u_val))
+ )
+ ) {
+ fprintf (stderr, "out of range found [ds: %lu], [value : %0.10e]\n", i, *value);
+ *value = DNAN;
+ }
+ }
+ eat_tag(&ptr3,"/row");
+ ptr2=ptr3;
+ }
+ eat_tag(&ptr2,"/database");
+ eat_tag(&ptr2,"/rra");
+ ptr=ptr2;
+ }
+ eat_tag(&ptr,"/rrd");
+
+ if((rrd->rra_ptr = calloc(1,sizeof(rra_ptr_t)*rrd->stat_head->rra_cnt)) == NULL) {
+ rrd_set_error("allocating rra_ptr");
+ return(-1);
+ }
+
+ for(i=0; i <rrd->stat_head->rra_cnt; i++) {
+ rrd->rra_ptr[i].cur_row = rrd->rra_def[i].row_cnt-1;
+ }
+ if (ptr==NULL)
+ return -1;
+ return 1;
+}
+
+
+
+
+
+/* create and empty rrd file according to the specs given */
+
+int
+rrd_write(char *file_name, rrd_t *rrd)
+{
+ unsigned long i,ii,val_cnt;
+ FILE *rrd_file=NULL;
+
+ if (strcmp("-",file_name)==0){
+ *rrd_file= *stdout;
+ } else {
+ if ((rrd_file = fopen(file_name,"wb")) == NULL ) {
+ rrd_set_error("can't create '%s'",file_name);
+ rrd_free(rrd);
+ return(-1);
+ }
+ }
+ fwrite(rrd->stat_head,
+ sizeof(stat_head_t), 1, rrd_file);
+
+ fwrite(rrd->ds_def,
+ sizeof(ds_def_t), rrd->stat_head->ds_cnt, rrd_file);
+
+ fwrite(rrd->rra_def,
+ sizeof(rra_def_t), rrd->stat_head->rra_cnt, rrd_file);
+
+ fwrite(rrd->live_head, sizeof(live_head_t),1, rrd_file);
+
+ fwrite( rrd->pdp_prep, sizeof(pdp_prep_t),rrd->stat_head->ds_cnt,rrd_file);
+
+ fwrite( rrd->cdp_prep, sizeof(cdp_prep_t),rrd->stat_head->rra_cnt*
+ rrd->stat_head->ds_cnt,rrd_file);
+ fwrite( rrd->rra_ptr, sizeof(rra_ptr_t), rrd->stat_head->rra_cnt,rrd_file);
+
+
+
+ /* calculate the number of rrd_values to dump */
+ val_cnt=0;
+ for(i=0; i < rrd->stat_head->rra_cnt; i++)
+ for(ii=0; ii < rrd->rra_def[i].row_cnt * rrd->stat_head->ds_cnt;ii++)
+ val_cnt++;
+ fwrite( rrd->rrd_value, sizeof(rrd_value_t),val_cnt,rrd_file);
+
+ /* lets see if we had an error */
+ if(ferror(rrd_file)){
+ rrd_set_error("a file error occurred while creating '%s'",file_name);
+ fclose(rrd_file);
+ return(-1);
+ }
+
+ fclose(rrd_file);
+ return 0;
+}
+
+
+int
+rrd_restore(int argc, char **argv)
+{
+ rrd_t rrd;
+ char *buf;
+ char rc = 0;
+
+ /* init rrd clean */
+ rrd_init(&rrd);
+ if (argc<3) {
+ rrd_set_error("usage rrdtool %s [--range-check/-r] file.xml file.rrd",argv[0]);
+ return -1;
+ }
+
+ while (1) {
+ static struct option long_options[] =
+ {
+ {"range-check", required_argument, 0, 'r'},
+ {0,0,0,0}
+ };
+ int option_index = 0;
+ int opt;
+
+
+ opt = getopt_long(argc, argv, "r", long_options, &option_index);
+
+ if (opt == EOF)
+ break;
+
+ switch(opt) {
+ case 'r':
+ rc=1;
+ break;
+ default:
+ rrd_set_error("usage rrdtool %s [--range-check|-r] file.xml file.rrd",argv[0]);
+ return -1;
+ break;
+ }
+ }
+
+ if (readfile(argv[optind],&buf,0)==-1){
+ return -1;
+ }
+ if (xml2rrd(buf,&rrd,rc)==-1) {
+ rrd_free(&rrd);
+ free(buf);
+ return -1;
+ }
+ free(buf);
+ if(rrd_write(argv[optind+1],&rrd)==-1){
+ rrd_free(&rrd);
+ return -1;
+ };
+ rrd_free(&rrd);
+ return 0;
+}
Deleted: trunk/orca/packages/rrdtool-1.0.7.2/src/rrd.dsw
Added: trunk/orca/packages/rrdtool-1.0.7.2/cgilib-0.4/cgiGetValue.3
==============================================================================
--- trunk/orca/packages/rrdtool-1.0.7.2/cgilib-0.4/cgiGetValue.3 (original)
+++ trunk/orca/packages/rrdtool-1.0.7.2/cgilib-0.4/cgiGetValue.3 Sat Jul 13 19:22:37 2002
@@ -0,0 +1,50 @@
+.\" cgiInit - Initializes cgi library
+.\" Copyright (c) 1998 Martin Schulze <joey at infodrom.north.de>
+.\"
+.\" This program is free software; you can redistribute it and/or modify
+.\" it under the terms of the GNU General Public License as published by
+.\" the Free Software Foundation; either version 2 of the License, or
+.\" (at your option) any later version.
+.\"
+.\" This program is distributed in the hope that it will be useful,
+.\" but WITHOUT ANY WARRANTY; without even the implied warranty of
+.\" MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+.\" GNU General Public License for more details.
+.\"
+.\" You should have received a copy of the GNU General Public License
+.\" along with this program; if not, write to the Free Software
+.\" Foundation, Inc.,59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+.\"
+.TH cgiInit 3 "14 February 1998" "Debian GNU/Linux" "Programmer's Manual"
+.SH NAME
+cgiInit \- Initializes cgi library
+.SH SYNOPSYS
+.nf
+.B #include <cgi.h>
+.sp
+.BI "char *cgiGetValue(s_cgi **" parms ", const char *" var );
+.fi
+.SH DESCRIPTION
+This routine returns a pointer to the value of a cgi variable.
+Encoded characters (%nn) are already decoded. One must not free the
+pointer.
+
+If
+.B multiple
+fields are used (i.e. a variable that may contain several values) the
+value returned contains all these values concatenated together with a
+newline character as separator.
+
+.SH "RETURN VALUE"
+On success a pointer to a string is returned. If the variable wasn't
+transmitted through CGI or was empty NULL is returned.
+
+.SH "AUTHOR"
+This cgi library is written by Martin Schulze
+<joey at infodrom.north.de>. If you have additions or improvements
+please get in touch with him.
+
+.SH "SEE ALSO"
+.BR cgiDebug (3),
+.BR cgiHeader (3),
+.BR cgiInit (3).
Added: trunk/orca/packages/rrdtool-1.0.7.2/cgilib-0.4/Makefile.in
==============================================================================
--- trunk/orca/packages/rrdtool-1.0.7.2/cgilib-0.4/Makefile.in (original)
+++ trunk/orca/packages/rrdtool-1.0.7.2/cgilib-0.4/Makefile.in Sat Jul 13 19:22:37 2002
@@ -0,0 +1,303 @@
+# Makefile.in generated automatically by automake 1.4 from Makefile.am
+
+# Copyright (C) 1994, 1995-8, 1999 Free Software Foundation, Inc.
+# This Makefile.in is free software; the Free Software Foundation
+# gives unlimited permission to copy and/or distribute it,
+# with or without modifications, as long as this notice is preserved.
+
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY, to the extent permitted by law; without
+# even the implied warranty of MERCHANTABILITY or FITNESS FOR A
+# PARTICULAR PURPOSE.
+
+#AUTOMAKE_OPTIONS = foreign
+
+# our local rules for autoconf (automake and libtool)
+#ACLOCAL_M4 = $(top_srcdir)/config/aclocal.m4
+#AUTOHEADER = @AUTOHEADER@ --localdir=config
+
+
+SHELL = @SHELL@
+
+srcdir = @srcdir@
+top_srcdir = @top_srcdir@
+VPATH = @srcdir@
+prefix = @prefix@
+exec_prefix = @exec_prefix@
+
+bindir = @bindir@
+sbindir = @sbindir@
+libexecdir = @libexecdir@
+datadir = @datadir@
+sysconfdir = @sysconfdir@
+sharedstatedir = @sharedstatedir@
+localstatedir = @localstatedir@
+libdir = @libdir@
+infodir = @infodir@
+mandir = @mandir@
+includedir = @includedir@
+oldincludedir = /usr/include
+
+DESTDIR =
+
+pkgdatadir = $(datadir)/@PACKAGE@
+pkglibdir = $(libdir)/@PACKAGE@
+pkgincludedir = $(includedir)/@PACKAGE@
+
+top_builddir = ..
+
+ACLOCAL = @ACLOCAL@
+AUTOCONF = @AUTOCONF@
+AUTOMAKE = @AUTOMAKE@
+AUTOHEADER = @AUTOHEADER@
+
+INSTALL = @INSTALL@
+INSTALL_PROGRAM = @INSTALL_PROGRAM@ $(AM_INSTALL_PROGRAM_FLAGS)
+INSTALL_DATA = @INSTALL_DATA@
+INSTALL_SCRIPT = @INSTALL_SCRIPT@
+transform = @program_transform_name@
+
+NORMAL_INSTALL = :
+PRE_INSTALL = :
+POST_INSTALL = :
+NORMAL_UNINSTALL = :
+PRE_UNINSTALL = :
+POST_UNINSTALL = :
+build_alias = @build_alias@
+build_triplet = @build@
+host_alias = @host_alias@
+host_triplet = @host@
+target_alias = @target_alias@
+target_triplet = @target@
+CC = @CC@
+CFLAGS = @CFLAGS@
+CGI_LIB_DIR = @CGI_LIB_DIR@
+COMP_PERL = @COMP_PERL@
+CPP = @CPP@
+GD_LIB_DIR = @GD_LIB_DIR@
+LIBTOOL = @LIBTOOL@
+PERL = @PERL@
+PNG_LIB_DIR = @PNG_LIB_DIR@
+RANLIB = @RANLIB@
+ZLIB_LIB_DIR = @ZLIB_LIB_DIR@
+
+noinst_LTLIBRARIES = librrd_cgi.la
+
+librrd_cgi_la_SOURCES = cgi.c cgi.h
+
+EXTRA_DIST = *.[1-9] *.dsp *.dsw readme cgitest.c jumpto.c
+mkinstalldirs = $(SHELL) $(top_srcdir)/config/mkinstalldirs
+CONFIG_HEADER = ../config/config.h
+CONFIG_CLEAN_FILES =
+LTLIBRARIES = $(noinst_LTLIBRARIES)
+
+
+DEFS = @DEFS@ -I. -I$(srcdir) -I../config
+CPPFLAGS = @CPPFLAGS@
+LDFLAGS = @LDFLAGS@
+LIBS = @LIBS@
+librrd_cgi_la_LDFLAGS =
+librrd_cgi_la_LIBADD =
+librrd_cgi_la_OBJECTS = cgi.lo
+COMPILE = $(CC) $(DEFS) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS)
+LTCOMPILE = $(LIBTOOL) --mode=compile $(CC) $(DEFS) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS)
+CCLD = $(CC)
+LINK = $(LIBTOOL) --mode=link $(CCLD) $(AM_CFLAGS) $(CFLAGS) $(LDFLAGS) -o $@
+DIST_COMMON = Makefile.am Makefile.in
+
+
+PACKAGE = @PACKAGE@
+VERSION = @VERSION@
+
+DISTFILES = $(DIST_COMMON) $(SOURCES) $(HEADERS) $(TEXINFOS) $(EXTRA_DIST)
+
+TAR = tar
+GZIP_ENV = --best
+SOURCES = $(librrd_cgi_la_SOURCES)
+OBJECTS = $(librrd_cgi_la_OBJECTS)
+
+all: all-redirect
+.SUFFIXES:
+.SUFFIXES: .S .c .lo .o .s
+$(srcdir)/Makefile.in: Makefile.am $(top_srcdir)/configure.in $(ACLOCAL_M4)
+ cd $(top_srcdir) && $(AUTOMAKE) --gnu --include-deps cgilib-0.4/Makefile
+
+Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status
+ cd $(top_builddir) \
+ && CONFIG_FILES=$(subdir)/$@ CONFIG_HEADERS= $(SHELL) ./config.status
+
+
+mostlyclean-noinstLTLIBRARIES:
+
+clean-noinstLTLIBRARIES:
+ -test -z "$(noinst_LTLIBRARIES)" || rm -f $(noinst_LTLIBRARIES)
+
+distclean-noinstLTLIBRARIES:
+
+maintainer-clean-noinstLTLIBRARIES:
+
+.c.o:
+ $(COMPILE) -c $<
+
+.s.o:
+ $(COMPILE) -c $<
+
+.S.o:
+ $(COMPILE) -c $<
+
+mostlyclean-compile:
+ -rm -f *.o core *.core
+
+clean-compile:
+
+distclean-compile:
+ -rm -f *.tab.c
+
+maintainer-clean-compile:
+
+.c.lo:
+ $(LIBTOOL) --mode=compile $(COMPILE) -c $<
+
+.s.lo:
+ $(LIBTOOL) --mode=compile $(COMPILE) -c $<
+
+.S.lo:
+ $(LIBTOOL) --mode=compile $(COMPILE) -c $<
+
+mostlyclean-libtool:
+ -rm -f *.lo
+
+clean-libtool:
+ -rm -rf .libs _libs
+
+distclean-libtool:
+
+maintainer-clean-libtool:
+
+librrd_cgi.la: $(librrd_cgi_la_OBJECTS) $(librrd_cgi_la_DEPENDENCIES)
+ $(LINK) $(librrd_cgi_la_LDFLAGS) $(librrd_cgi_la_OBJECTS) $(librrd_cgi_la_LIBADD) $(LIBS)
+
+tags: TAGS
+
+ID: $(HEADERS) $(SOURCES) $(LISP)
+ list='$(SOURCES) $(HEADERS)'; \
+ unique=`for i in $$list; do echo $$i; done | \
+ awk ' { files[$$0] = 1; } \
+ END { for (i in files) print i; }'`; \
+ here=`pwd` && cd $(srcdir) \
+ && mkid -f$$here/ID $$unique $(LISP)
+
+TAGS: $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) $(LISP)
+ tags=; \
+ here=`pwd`; \
+ list='$(SOURCES) $(HEADERS)'; \
+ unique=`for i in $$list; do echo $$i; done | \
+ awk ' { files[$$0] = 1; } \
+ END { for (i in files) print i; }'`; \
+ test -z "$(ETAGS_ARGS)$$unique$(LISP)$$tags" \
+ || (cd $(srcdir) && etags $(ETAGS_ARGS) $$tags $$unique $(LISP) -o $$here/TAGS)
+
+mostlyclean-tags:
+
+clean-tags:
+
+distclean-tags:
+ -rm -f TAGS ID
+
+maintainer-clean-tags:
+
+distdir = $(top_builddir)/$(PACKAGE)-$(VERSION)/$(subdir)
+
+subdir = cgilib-0.4
+
+distdir: $(DISTFILES)
+ @for file in $(DISTFILES); do \
+ d=$(srcdir); \
+ if test -d $$d/$$file; then \
+ cp -pr $$/$$file $(distdir)/$$file; \
+ else \
+ test -f $(distdir)/$$file \
+ || ln $$d/$$file $(distdir)/$$file 2> /dev/null \
+ || cp -p $$d/$$file $(distdir)/$$file || :; \
+ fi; \
+ done
+cgi.lo cgi.o : cgi.c cgi.h
+
+info-am:
+info: info-am
+dvi-am:
+dvi: dvi-am
+check-am: all-am
+check: check-am
+installcheck-am:
+installcheck: installcheck-am
+install-exec-am:
+install-exec: install-exec-am
+
+install-data-am:
+install-data: install-data-am
+
+install-am: all-am
+ @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am
+install: install-am
+uninstall-am:
+uninstall: uninstall-am
+all-am: Makefile $(LTLIBRARIES)
+all-redirect: all-am
+install-strip:
+ $(MAKE) $(AM_MAKEFLAGS) AM_INSTALL_PROGRAM_FLAGS=-s install
+installdirs:
+
+
+mostlyclean-generic:
+
+clean-generic:
+
+distclean-generic:
+ -rm -f Makefile $(CONFIG_CLEAN_FILES)
+ -rm -f config.cache config.log stamp-h stamp-h[0-9]*
+
+maintainer-clean-generic:
+mostlyclean-am: mostlyclean-noinstLTLIBRARIES mostlyclean-compile \
+ mostlyclean-libtool mostlyclean-tags \
+ mostlyclean-generic
+
+mostlyclean: mostlyclean-am
+
+clean-am: clean-noinstLTLIBRARIES clean-compile clean-libtool \
+ clean-tags clean-generic mostlyclean-am
+
+clean: clean-am
+
+distclean-am: distclean-noinstLTLIBRARIES distclean-compile \
+ distclean-libtool distclean-tags distclean-generic \
+ clean-am
+ -rm -f libtool
+
+distclean: distclean-am
+
+maintainer-clean-am: maintainer-clean-noinstLTLIBRARIES \
+ maintainer-clean-compile maintainer-clean-libtool \
+ maintainer-clean-tags maintainer-clean-generic \
+ distclean-am
+ @echo "This command is intended for maintainers to use;"
+ @echo "it deletes files that may require special tools to rebuild."
+
+maintainer-clean: maintainer-clean-am
+
+.PHONY: mostlyclean-noinstLTLIBRARIES distclean-noinstLTLIBRARIES \
+clean-noinstLTLIBRARIES maintainer-clean-noinstLTLIBRARIES \
+mostlyclean-compile distclean-compile clean-compile \
+maintainer-clean-compile mostlyclean-libtool distclean-libtool \
+clean-libtool maintainer-clean-libtool tags mostlyclean-tags \
+distclean-tags clean-tags maintainer-clean-tags distdir info-am info \
+dvi-am dvi check check-am installcheck-am installcheck install-exec-am \
+install-exec install-data-am install-data install-am install \
+uninstall-am uninstall all-redirect all-am all installdirs \
+mostlyclean-generic distclean-generic clean-generic \
+maintainer-clean-generic clean mostlyclean distclean maintainer-clean
+
+
+# Tell versions [3.59,3.63) of GNU make to not export all variables.
+# Otherwise a system limit (for SysV at least) may be exceeded.
+.NOEXPORT:
Added: trunk/orca/packages/rrdtool-1.0.7.2/cgilib-0.4/cgi.c
==============================================================================
--- trunk/orca/packages/rrdtool-1.0.7.2/cgilib-0.4/cgi.c (original)
+++ trunk/orca/packages/rrdtool-1.0.7.2/cgilib-0.4/cgi.c Sat Jul 13 19:22:37 2002
@@ -0,0 +1,271 @@
+/*
+ cgi.c - Some simple routines for cgi programming
+ Copyright (c) 1996-8 Martin Schulze <joey at infodrom.north.de>
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111, USA.
+ */
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <ctype.h>
+#include "cgi.h"
+
+int cgiDebugLevel = 0;
+int cgiDebugStderr = 1;
+
+void cgiHeader ()
+{
+ printf ("Content-type: text/html\n\n");
+}
+
+void cgiDebug (int level, int where)
+{
+ if (level > 0)
+ cgiDebugLevel = level;
+ else
+ cgiDebugLevel = 0;
+ if (where)
+ cgiDebugStderr = 0;
+ else
+ cgiDebugStderr = 1;
+}
+
+char *cgiDecodeString (char *text)
+{
+ char *cp, *xp;
+
+ for (cp=text,xp=text; *cp; cp++) {
+ if (*cp == '%') {
+ if (strchr("0123456789ABCDEFabcdef", *(cp+1))
+ && strchr("0123456789ABCDEFabcdef", *(cp+2))) {
+ if (islower((unsigned int)*(cp+1)))
+ *(cp+1) = toupper((unsigned int)*(cp+1));
+ if (islower((unsigned int)*(cp+2)))
+ *(cp+2) = toupper((unsigned int)*(cp+2));
+ *(xp) = (*(cp+1) >= 'A' ? *(cp+1) - 'A' + 10 : *(cp+1) - '0' ) * 16
+ + (*(cp+2) >= 'A' ? *(cp+2) - 'A' + 10 : *(cp+2) - '0');
+ xp++;cp+=2;
+ }
+ } else {
+ *(xp++) = *cp;
+ }
+ }
+ memset(xp, 0, cp-xp);
+ return text;
+}
+
+/* cgiInit()
+ *
+ * Read from stdin if no string is provided via CGI. Variables that
+ * doesn't have a value associated with it doesn't get stored.
+ */
+s_cgi **cgiInit ()
+{
+ int length;
+ char *line = NULL;
+ int numargs;
+ char *cp, *ip, *esp, *sptr;
+ s_cgi **result;
+ int i, k;
+ char tmp[101];
+
+ cp = getenv("REQUEST_METHOD");
+ ip = getenv("CONTENT_LENGTH");
+
+ if (cp && !strcmp(cp, "POST")) {
+ if (ip) {
+ length = atoi(ip);
+ if ((line = (char *)malloc (length+2)) == NULL)
+ return NULL;
+ fgets(line, length+1, stdin);
+ } else
+ return NULL;
+ } else if (cp && !strcmp(cp, "GET")) {
+ esp = getenv("QUERY_STRING");
+ if (esp && strlen(esp)) {
+ if ((line = (char *)malloc (strlen(esp)+2)) == NULL)
+ return NULL;
+ sprintf (line, "%s", esp);
+ } else
+ return NULL;
+ } else {
+ length = 0;
+ printf ("(offline mode: enter name=value pairs on standard input)\n");
+ for (cp = fgets(tmp, 100, stdin); cp != NULL;
+ cp = fgets(tmp, 100, stdin) ) {
+ if (strlen(tmp)) {
+ length += strlen(tmp);
+ if ((ip = (char *)malloc ((length+1) * sizeof(char))) == NULL)
+ return NULL;
+ memset(ip,0, length);
+ if (line) {
+ if (line[strlen(line)-1] == '\n')
+ line[strlen(line)-1] = '&';
+ strcpy(ip, line);
+ }
+ ip = strcat(ip, tmp);
+ if (line)
+ free (line);
+ line = ip;
+ }
+ }
+ if (!line)
+ return NULL;
+ if (line[strlen(line)-1] == '\n')
+ line[strlen(line)-1] = '\0';
+ }
+
+ /*
+ * From now on all cgi variables are stored in the variable line
+ * and look like foo=bar&foobar=barfoo&foofoo=
+ */
+
+ if (cgiDebugLevel > 0) {
+ if (cgiDebugStderr) {
+ fprintf (stderr, "Received cgi input: %s\n", line);
+ } else {
+ printf ("<b>Received cgi input</b><br>\n<pre>\n--\n%s\n--\n</pre>\n\n", line);
+ }
+ }
+ for (cp=line; *cp; cp++) {
+ if (*cp == '+') {
+ *cp = ' ';
+ }
+ }
+ if (strlen(line)) {
+ for (numargs=1,cp=line; *cp; cp++) {
+ if (*cp == '&') numargs++;
+ }
+ } else {
+ numargs = 0;
+ }
+ if (cgiDebugLevel > 0) {
+ if (cgiDebugStderr) {
+ fprintf (stderr, "%d cgi variables found.\n", numargs);
+ } else {
+ printf ("%d cgi variables found.<br>\n", numargs);
+ }
+ }
+ if ((result = (s_cgi **)malloc((numargs+1) * sizeof(s_cgi *))) == NULL) {
+ return NULL;
+ }
+
+ memset (result, 0, (numargs+1) * sizeof(s_cgi *));
+
+ cp = line;
+ i=0;
+ while (*cp) {
+ if ((ip = (char *)strchr(cp, '&')) != NULL) {
+ *ip = '\0';
+ }else {
+ ip = cp + strlen(cp);
+ }
+
+ if ((esp=(char *)strchr(cp, '=')) == NULL) {
+ cp = ++ip;
+ continue;
+ }
+
+ if (!strlen(esp)) {
+ cp = ++ip;
+ continue;
+ }
+
+ if (i<numargs) {
+
+ for (k=0; k<i && (strncmp(result[k]->name,cp, esp-cp)); k++);
+ /* try to find out if there's already such a variable */
+ if (k == i) { /* No such variable yet */
+ if ((result[i] = (s_cgi *)malloc(sizeof(s_cgi))) == NULL)
+ return NULL;
+ if ((result[i]->name = (char *)malloc((esp-cp+1) * sizeof(char))) == NULL)
+ return NULL;
+ memset (result[i]->name, 0, esp-cp+1);
+ strncpy(result[i]->name, cp, esp-cp);
+ cp = ++esp;
+ if ((result[i]->value = (char *)malloc((ip-esp+1) * sizeof(char))) == NULL)
+ return NULL;
+ memset (result[i]->value, 0, ip-esp+1);
+ strncpy(result[i]->value, cp, ip-esp);
+ result[i]->value = cgiDecodeString(result[i]->value);
+ if (cgiDebugLevel) {
+ if (cgiDebugStderr)
+ fprintf (stderr, "%s: %s\n", result[i]->name, result[i]->value);
+ else
+ printf ("<h3>Variable %s</h3>\n<pre>\n%s\n</pre>\n\n", result[i]->name, result[i]->value);
+ }
+ i++;
+ } else { /* There is already such a name, suppose a mutiple field */
+ if ((sptr = (char *)malloc((strlen(result[k]->value)+(ip-esp)+2)* sizeof(char))) == NULL)
+ return NULL;
+ memset (sptr, 0, strlen(result[k]->value)+(ip-esp)+2);
+ sprintf (sptr, "%s\n", result[k]->value);
+ cp = ++esp;
+ strncat(sptr, cp, ip-esp);
+ free(result[k]->value);
+ result[k]->value = sptr;
+ }
+ }
+ cp = ++ip;
+ }
+ return result;
+}
+
+char *cgiGetValue(s_cgi **parms, const char *var)
+{
+ int i;
+
+ if (parms) {
+ for (i=0;parms[i]; i++) {
+ if (!strcmp(var,parms[i]->name)) {
+ if (cgiDebugLevel > 0) {
+ if (cgiDebugStderr) {
+ fprintf (stderr, "%s found as %s\n", var, parms[i]->value);
+ } else {
+ printf ("%s found as %s<br>\n", var, parms[i]->value);
+ }
+ }
+ return parms[i]->value;
+ }
+ }
+ }
+ if (cgiDebugLevel) {
+ if (cgiDebugStderr) {
+ fprintf (stderr, "%s not found\n", var);
+ } else {
+ printf ("%s not found<br>\n", var);
+ }
+ }
+ return NULL;
+}
+
+void cgiRedirect (const char *url)
+{
+ if (url && strlen(url)) {
+ printf ("Content-type: text/html\nContent-length: %d\n", 77+(strlen(url)*2));
+ printf ("Status: 302 Temporal Relocation\n");
+ printf ("Location: %s\n\n", url);
+ printf ("<html>\n<body>\nThe page has been moved to <a href=\"%s\">%s</a>\n</body>\n</html>\n", url, url);
+ }
+}
+
+/*
+ * Local variables:
+ * c-indent-level: 4
+ * c-basic-offset: 4
+ * tab-width: 8
+ * End:
+ */
Added: trunk/orca/packages/rrdtool-1.0.7.2/cgilib-0.4/cgitest.c
==============================================================================
--- trunk/orca/packages/rrdtool-1.0.7.2/cgilib-0.4/cgitest.c (original)
+++ trunk/orca/packages/rrdtool-1.0.7.2/cgilib-0.4/cgitest.c Sat Jul 13 19:22:37 2002
@@ -0,0 +1,89 @@
+/*
+ cgitest.c - Testprogram for cgi.o
+ Copyright (c) 1998 Martin Schulze <joey at infodrom.north.de>
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111, USA.
+ */
+
+/*
+ * Compile with: cc -o cgitest cgitest.c -lcgi
+ */
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <cgi.h>
+
+s_cgi **cgi;
+
+void print_form()
+{
+ printf ("<h1>Test-Form</h1>\n");
+ printf ("<form action=\"/cgi-bin/cgitest/insertdata\" method=post>\n");
+ printf ("Input: <input name=string size=50>\n<br>");
+ printf ("<select name=select multiple>\n<option>Nr. 1\n<option>Nr. 2\n<option>Nr. 3\n<option>Nr. 4\n</select>\n");
+ printf ("Text: <textarea name=text cols=50>\n</textarea>\n");
+ printf ("<center><input type=submit value=Submit> ");
+ printf ("<input type=reset value=Reset></center>\n");
+ printf ("</form>\n");
+}
+
+void eval_cgi()
+{
+ printf ("<h1>Results</h1>\n\n");
+ printf ("<b>string</b>: %s<p>\n", cgiGetValue(cgi, "string"));
+ printf ("<b>text</b>: %s<p>\n", cgiGetValue(cgi, "text"));
+ printf ("<b>select</b>: %s<p>\n", cgiGetValue(cgi, "select"));
+}
+
+
+void main ()
+{
+ char *path_info = NULL;
+
+ cgiDebug(0, 0);
+ cgi = cgiInit();
+
+ path_info = getenv("PATH_INFO");
+ if (path_info) {
+ if (!strcmp(path_info, "/redirect")) {
+ cgiRedirect("http://www.infodrom.north.de/");
+ exit (0);
+ } else {
+ cgiHeader();
+ printf ("<html>\n<head><title>cgilib</title></title>\n\n<body>\n");
+ printf ("<h1>cgilib</h1>\n");
+ printf ("path_info: %s<br>\n", path_info);
+ if (!strcmp(path_info, "/insertdata")) {
+ eval_cgi();
+ } else
+ print_form();
+ }
+ } else {
+ cgiHeader();
+ printf ("<html>\n<head><title>cgilib</title></title>\n\n<body>\n");
+ printf ("<h1>cgilib</h1>\n");
+ print_form();
+ }
+
+ printf ("\n<hr>\n</body>\n</html>\n");
+}
+
+/*
+ * Local variables:
+ * c-indent-level: 4
+ * c-basic-offset: 4
+ * tab-width: 8
+ * End:
+ */
Added: trunk/orca/packages/rrdtool-1.0.7.2/cgilib-0.4/cgiRedirect.3
==============================================================================
--- trunk/orca/packages/rrdtool-1.0.7.2/cgilib-0.4/cgiRedirect.3 (original)
+++ trunk/orca/packages/rrdtool-1.0.7.2/cgilib-0.4/cgiRedirect.3 Sat Jul 13 19:22:38 2002
@@ -0,0 +1,45 @@
+.\" cgiRedirect - Redirect the browser somewhere else
+.\" Copyright (c) 1998 Martin Schulze <joey at infodrom.north.de>
+.\"
+.\" This program is free software; you can redistribute it and/or modify
+.\" it under the terms of the GNU General Public License as published by
+.\" the Free Software Foundation; either version 2 of the License, or
+.\" (at your option) any later version.
+.\"
+.\" This program is distributed in the hope that it will be useful,
+.\" but WITHOUT ANY WARRANTY; without even the implied warranty of
+.\" MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+.\" GNU General Public License for more details.
+.\"
+.\" You should have received a copy of the GNU General Public License
+.\" along with this program; if not, write to the Free Software
+.\" Foundation, Inc.,59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+.\"
+.TH cgiRedirect 3 "17 February 1998" "Debian GNU/Linux" "Programmer's Manual"
+.SH NAME
+cgiRedirect \- Redirect the browser somewhere else
+.SH SYNOPSYS
+.nf
+.B #include <cgi.h>
+.sp
+.BI "void cgiRedirect(char *" url );
+.fi
+.SH DESCRIPTION
+The
+.B cgiRedirect
+routine redirects the browser to another
+.IR url .
+This mechanism may be useful to redirect invalid requests to some
+static pages describing the policy.
+.SH "RETURN VALUE"
+.BR cgiRedirect ()
+does not return a value.
+
+.SH "AUTHOR"
+This cgi library is written by Martin Schulze
+<joey at infodrom.north.de>. If you have additions or improvements
+please get in touch with him.
+
+.SH "SEE ALSO"
+.BR cgiInit (3),
+.BR cgiHeader (3).
Added: trunk/orca/packages/rrdtool-1.0.7.2/cgilib-0.4/cgi.5
==============================================================================
--- trunk/orca/packages/rrdtool-1.0.7.2/cgilib-0.4/cgi.5 (original)
+++ trunk/orca/packages/rrdtool-1.0.7.2/cgilib-0.4/cgi.5 Sat Jul 13 19:22:38 2002
@@ -0,0 +1,115 @@
+.\" cgi - Common Gateway Interface
+.\" Copyright (c) 1998 Martin Schulze <joey at infodrom.north.de>
+.\"
+.\" This program is free software; you can redistribute it and/or modify
+.\" it under the terms of the GNU General Public License as published by
+.\" the Free Software Foundation; either version 2 of the License, or
+.\" (at your option) any later version.
+.\"
+.\" This program is distributed in the hope that it will be useful,
+.\" but WITHOUT ANY WARRANTY; without even the implied warranty of
+.\" MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+.\" GNU General Public License for more details.
+.\"
+.\" You should have received a copy of the GNU General Public License
+.\" along with this program; if not, write to the Free Software
+.\" Foundation, Inc.,59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+.\"
+.TH cgi 5 "15 February 1998" "Debian GNU/Linux" "Programmer's Manual"
+.SH NAME
+cgi \- Common Gateway Interface
+
+.SH DESCRIPTION
+The Common Gateway Interface is a way to create dynamic web pages.
+It defines rules for interaction between a program and the web server
+while the server talks to the client. There are some ways to use it.
+
+.SH "ENVIRONMENT"
+Normally the webserver sets several environment variables to give some
+information to the CGI program so it can determine various stuff.
+.TP
+.B AUTH_TYPE
+This reflects the authentification method used to validate a user.
+.TP
+.B CONTENT_LENGTH
+The length of the data in bytes passed to the CGI program through
+standard input. This is used by the POST method.
+.TP
+.B CONTENT_TYPE
+The MIME type of the query data, such as "text/html", optional.
+.TP
+.B DOCUMENT_ROOT
+This reflects the document root directory of the webserver.
+.TP
+.B GATEWAY_INTERFACE
+Reflects the version of the Common Gateway Interface that the server
+is using
+.TP
+.B HTTP_ACCEPT
+A comma separated list of MIME type that the client is willing to
+accept.
+.TP
+.B HTTP_FROM
+The email address of the user issuing the information request. This
+is not supported by most browsers.
+.TP
+.B HTTP_REFERER
+Reflects the URL from which tis cgi program was accessed.
+.TP
+.B HTTP_USER_AGENT
+The name, version and libraries of the browser making the request.
+This information can be used to determine if the browser is capable of
+graphics and is able to display frames and tables.
+.TP
+.B PATH_INFO
+This shows extra information that was passed to the cgi program via
+command line. Normally it's empty or non-existant.
+.TP
+.B PATH_TRANSLATED
+The translated path on the local filesystem.
+.TP
+.B QUERY_STRING
+This variable refers to additional arguments that were appended to the
+cgi programm - normally with the '?' sign.
+.TP
+.B REMOTE_ADDR
+This refers to the host from which the information request was issued,
+as ip number.
+.TP
+.B REMOTE_HOST
+This refers to the host from which the information request was issued.
+.TP
+.B REMOTE_USER
+The authenticated name of the user.
+.TP
+.B REQUEST_METHOD
+This refers to the method with which the information request was
+issued. Normally this is either GET or POST.
+.TP
+.B SCRIPT_NAME
+The virtual name of the script being executed.
+.TP
+.B SERVER_NAME
+The server's hostname or ip number. This may be used to determine the
+correct paths or resulting HTML code for cgi programs that are used on
+the same machine for several servers.
+.TP
+.B SERVER_PROTOCOL
+This is the nae and version of the information protocol the request
+came in with. Normally this is "HTTP/1.0" or "HTTP/1.1".
+.TP
+.B SERVER_PORT
+This refers to the TCP/IP port on which the webserver is running.
+.TP
+.B SERVER_SOFTWARE
+This reflects the name and revision of the webserver software.
+
+.SH "AUTHOR"
+This cgi library is written by Martin Schulze
+<joey at infodrom.north.de>. If you have additions or improvements
+please get in touch with him.
+
+.SH "SEE ALSO"
+.BR cgiDebug (3),
+.BR cgiHeader (3),
+.BR cgiGetValue (3).
Added: trunk/orca/packages/rrdtool-1.0.7.2/cgilib-0.4/cgilib.dsw
==============================================================================
--- trunk/orca/packages/rrdtool-1.0.7.2/cgilib-0.4/cgilib.dsw (original)
+++ trunk/orca/packages/rrdtool-1.0.7.2/cgilib-0.4/cgilib.dsw Sat Jul 13 19:22:38 2002
@@ -0,0 +1,29 @@
+Microsoft Developer Studio Workspace File, Format Version 6.00
+# WARNING: DO NOT EDIT OR DELETE THIS WORKSPACE FILE!
+
+###############################################################################
+
+Project: "cgilib"=".\cgilib.dsp" - Package Owner=<4>
+
+Package=<5>
+{{{
+}}}
+
+Package=<4>
+{{{
+}}}
+
+###############################################################################
+
+Global:
+
+Package=<5>
+{{{
+}}}
+
+Package=<3>
+{{{
+}}}
+
+###############################################################################
+
Added: trunk/orca/packages/rrdtool-1.0.7.2/cgilib-0.4/cgiInit.3
==============================================================================
--- trunk/orca/packages/rrdtool-1.0.7.2/cgilib-0.4/cgiInit.3 (original)
+++ trunk/orca/packages/rrdtool-1.0.7.2/cgilib-0.4/cgiInit.3 Sat Jul 13 19:22:38 2002
@@ -0,0 +1,49 @@
+.\" cgiInit - Initializes cgi library
+.\" Copyright (c) 1998 Martin Schulze <joey at infodrom.north.de>
+.\"
+.\" This program is free software; you can redistribute it and/or modify
+.\" it under the terms of the GNU General Public License as published by
+.\" the Free Software Foundation; either version 2 of the License, or
+.\" (at your option) any later version.
+.\"
+.\" This program is distributed in the hope that it will be useful,
+.\" but WITHOUT ANY WARRANTY; without even the implied warranty of
+.\" MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+.\" GNU General Public License for more details.
+.\"
+.\" You should have received a copy of the GNU General Public License
+.\" along with this program; if not, write to the Free Software
+.\" Foundation, Inc.,59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+.\"
+.TH cgiInit 3 "14 February 1998" "Debian GNU/Linux" "Programmer's Manual"
+.SH NAME
+cgiInit \- Initializes cgi library
+.SH SYNOPSYS
+.nf
+.B #include <cgi.h>
+.sp
+.B s_cgi **cgiInit();
+.fi
+.SH DESCRIPTION
+This routine initializes the cgi routines. Mainly it reads in and
+decodes cgi data for later processing. If the program is not called
+via cgi interface the user is prompted to type in cgi variable
+bindings via stdin - just like GGI.pm does.
+
+This routine normally is the first or second. Only
+.BR cgiDebug ()
+may be called before. If debugging is enabled this routine produces
+some additional output.
+.SH "RETURN VALUE"
+On success a set of cgi variable bindings is returned that is needed
+for later processing. If an error occurs NULL is returned.
+
+.SH "AUTHOR"
+This cgi library is written by Martin Schulze
+<joey at infodrom.north.de>. If you have additions or improvements
+please get in touch with him.
+
+.SH "SEE ALSO"
+.BR cgiDebug (3),
+.BR cgiHeader (3),
+.BR cgiGetValue (3).
Added: trunk/orca/packages/rrdtool-1.0.7.2/cgilib-0.4/cgi.h
==============================================================================
--- trunk/orca/packages/rrdtool-1.0.7.2/cgilib-0.4/cgi.h (original)
+++ trunk/orca/packages/rrdtool-1.0.7.2/cgilib-0.4/cgi.h Sat Jul 13 19:22:38 2002
@@ -0,0 +1,59 @@
+/*
+ cgi.h - Some simple routines for cgi programming
+ Copyright (c) 1996-8 Martin Schulze <joey at infodrom.north.de>
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111, USA.
+ */
+
+#ifndef _CGI_H_
+#define _CGI_H_
+
+typedef struct cgi_s {
+ char *name,
+ *value;
+} s_cgi;
+
+/* cgiHeader
+ *
+ * returns a valid CGI Header (Content-type...)
+ */
+void cgiHeader ();
+
+/* cgiDebug
+ *
+ * Set/unsets debugging
+ */
+void cgiDebug (int level, int where);
+
+/* cgiInit
+ *
+ * Reads in variables set via POST or stdin
+ */
+s_cgi **cgiInit ();
+
+/* cgiGetValue
+ *
+ * Returns the value of the specified variable or NULL if it's empty
+ * or doesn't exist.
+ */
+char *cgiGetValue(s_cgi **parms, const char *var);
+
+/* cgiRedirect
+ *
+ * Provides a valid redirect for web pages.
+ */
+void cgiRedirect (const char *url);
+
+#endif /* _CGI_H_ */
Added: trunk/orca/packages/rrdtool-1.0.7.2/cgilib-0.4/Makefile.am
==============================================================================
--- trunk/orca/packages/rrdtool-1.0.7.2/cgilib-0.4/Makefile.am (original)
+++ trunk/orca/packages/rrdtool-1.0.7.2/cgilib-0.4/Makefile.am Sat Jul 13 19:22:38 2002
@@ -0,0 +1,14 @@
+## Process this file with automake to produce Makefile.in
+
+#AUTOMAKE_OPTIONS = foreign
+
+# our local rules for autoconf (automake and libtool)
+#ACLOCAL_M4 = $(top_srcdir)/config/aclocal.m4
+#AUTOHEADER = @AUTOHEADER@ --localdir=config
+
+noinst_LTLIBRARIES = librrd_cgi.la
+
+librrd_cgi_la_SOURCES = cgi.c cgi.h
+
+EXTRA_DIST= *.[1-9] *.dsp *.dsw readme cgitest.c jumpto.c
+
Added: trunk/orca/packages/rrdtool-1.0.7.2/cgilib-0.4/cgiDebug.3
==============================================================================
--- trunk/orca/packages/rrdtool-1.0.7.2/cgilib-0.4/cgiDebug.3 (original)
+++ trunk/orca/packages/rrdtool-1.0.7.2/cgilib-0.4/cgiDebug.3 Sat Jul 13 19:22:39 2002
@@ -0,0 +1,48 @@
+.\" cgiDebug - Set the debug level for cgi programming
+.\" Copyright (c) 1998 Martin Schulze <joey at infodrom.north.de>
+.\"
+.\" This program is free software; you can redistribute it and/or modify
+.\" it under the terms of the GNU General Public License as published by
+.\" the Free Software Foundation; either version 2 of the License, or
+.\" (at your option) any later version.
+.\"
+.\" This program is distributed in the hope that it will be useful,
+.\" but WITHOUT ANY WARRANTY; without even the implied warranty of
+.\" MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+.\" GNU General Public License for more details.
+.\"
+.\" You should have received a copy of the GNU General Public License
+.\" along with this program; if not, write to the Free Software
+.\" Foundation, Inc.,59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+.\"
+.TH cgiDebug 3 "14 February 1998" "Debian GNU/Linux" "Programmer's Manual"
+.SH NAME
+cgiDebug \- Set the debug level for cgi programming
+.SH SYNOPSYS
+.nf
+.B #include <cgi.h>
+.sp
+.BI "void cgiDebug(int " level ", int " where );
+.fi
+.SH DESCRIPTION
+This routine controls debugging for the cgi library. At the moment
+only level 0 (default, no debugging) and 1 (debugging enabled) are
+supported. The second argument
+.I where
+specifies if debug output should be written to stdout using HTML (1)
+or to stderr as plain text (0, default).
+
+This shoud be the first one called from the cgi library.
+.SH "RETURN VALUE"
+.BR cgiDebug ()
+does not return a value.
+
+.SH "AUTHOR"
+This cgi library is written by Martin Schulze
+<joey at infodrom.north.de>. If you have additions or improvements
+please get in touch with him.
+
+.SH "SEE ALSO"
+.BR cgiInit (3),
+.BR cgiHeader (3),
+.BR cgiGetValue (3).
Added: trunk/orca/packages/rrdtool-1.0.7.2/cgilib-0.4/cgiHeader.3
==============================================================================
--- trunk/orca/packages/rrdtool-1.0.7.2/cgilib-0.4/cgiHeader.3 (original)
+++ trunk/orca/packages/rrdtool-1.0.7.2/cgilib-0.4/cgiHeader.3 Sat Jul 13 19:22:39 2002
@@ -0,0 +1,42 @@
+.\" cgiHeader - Write the cgi-bin header
+.\" Copyright (c) 1998 Martin Schulze <joey at infodrom.north.de>
+.\"
+.\" This program is free software; you can redistribute it and/or modify
+.\" it under the terms of the GNU General Public License as published by
+.\" the Free Software Foundation; either version 2 of the License, or
+.\" (at your option) any later version.
+.\"
+.\" This program is distributed in the hope that it will be useful,
+.\" but WITHOUT ANY WARRANTY; without even the implied warranty of
+.\" MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+.\" GNU General Public License for more details.
+.\"
+.\" You should have received a copy of the GNU General Public License
+.\" along with this program; if not, write to the Free Software
+.\" Foundation, Inc.,59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+.\"
+.TH cgiInit 3 "14 February 1998" "Debian GNU/Linux" "Programmer's Manual"
+.SH NAME
+cgiHeader \- Write the cgi-bin header
+.SH SYNOPSYS
+.nf
+.B #include <cgi.h>
+.sp
+.B void cgiHeader();
+.fi
+.SH DESCRIPTION
+This routine just prints out the Contents-type: line to make the web
+server happy.
+
+.SH "RETURN VALUE"
+This routine does not return a value.
+
+.SH "AUTHOR"
+This cgi library is written by Martin Schulze
+<joey at infodrom.north.de>. If you have additions or improvements
+please get in touch with him.
+
+.SH "SEE ALSO"
+.BR cgiDebug (3),
+.BR cgiInit (3),
+.BR cgiGetValue (3).
Added: trunk/orca/packages/rrdtool-1.0.7.2/cgilib-0.4/readme
==============================================================================
--- trunk/orca/packages/rrdtool-1.0.7.2/cgilib-0.4/readme (original)
+++ trunk/orca/packages/rrdtool-1.0.7.2/cgilib-0.4/readme Sat Jul 13 19:22:39 2002
@@ -0,0 +1,24 @@
+To use this library simply include the cgi.h include file with the
+following command in your C programs:
+
+#include <cgi.h>
+
+And add the libcgi.a to the linker either by modifying the LDFLAGS in
+your makefiles or by adding `-lcgi' to the appropriate commandline.
+
+
+HTTP Return Codes
+
+ http://www.w3.org/Protocols/HTTP/HTRESP.html
+
+HTTP Headers
+
+ http://www.w3.org/Protocols/HTTP/Object_Headers.html
+
+
+If you have additions, questions or improvements please don't hesitate
+to contact me.
+
+Infodrom Oldenburg
+Martin Schulze
+joey at infodrom.north.de
Added: trunk/orca/packages/rrdtool-1.0.7.2/cgilib-0.4/cgilib.dsp
==============================================================================
--- trunk/orca/packages/rrdtool-1.0.7.2/cgilib-0.4/cgilib.dsp (original)
+++ trunk/orca/packages/rrdtool-1.0.7.2/cgilib-0.4/cgilib.dsp Sat Jul 13 19:22:39 2002
@@ -0,0 +1,98 @@
+# Microsoft Developer Studio Project File - Name="cgilib" - Package Owner=<4>
+# Microsoft Developer Studio Generated Build File, Format Version 6.00
+# ** DO NOT EDIT **
+
+# TARGTYPE "Win32 (x86) Static Library" 0x0104
+
+CFG=cgilib - Win32 Debug
+!MESSAGE This is not a valid makefile. To build this project using NMAKE,
+!MESSAGE use the Export Makefile command and run
+!MESSAGE
+!MESSAGE NMAKE /f "cgilib.mak".
+!MESSAGE
+!MESSAGE You can specify a configuration when running NMAKE
+!MESSAGE by defining the macro CFG on the command line. For example:
+!MESSAGE
+!MESSAGE NMAKE /f "cgilib.mak" CFG="cgilib - Win32 Debug"
+!MESSAGE
+!MESSAGE Possible choices for configuration are:
+!MESSAGE
+!MESSAGE "cgilib - Win32 Release" (based on "Win32 (x86) Static Library")
+!MESSAGE "cgilib - Win32 Debug" (based on "Win32 (x86) Static Library")
+!MESSAGE
+
+# Begin Project
+# PROP AllowPerConfigDependencies 0
+# PROP Scc_ProjName ""
+# PROP Scc_LocalPath ""
+CPP=cl.exe
+RSC=rc.exe
+
+!IF "$(CFG)" == "cgilib - Win32 Release"
+
+# PROP BASE Use_MFC 0
+# PROP BASE Use_Debug_Libraries 0
+# PROP BASE Output_Dir "Release"
+# PROP BASE Intermediate_Dir "Release"
+# PROP BASE Target_Dir ""
+# PROP Use_MFC 0
+# PROP Use_Debug_Libraries 0
+# PROP Output_Dir "Release"
+# PROP Intermediate_Dir "Release"
+# PROP Target_Dir ""
+# ADD BASE CPP /nologo /W3 /GX /O2 /D "WIN32" /D "NDEBUG" /D "_MBCS" /D "_LIB" /YX /FD /c
+# ADD CPP /nologo /MT /W3 /GX /O2 /I "..\\" /D "WIN32" /D "NDEBUG" /D "_WINDOWS" /D "_MBCS" /D "_CTYPE_DISABLE_MACROS" /FD /c
+# SUBTRACT CPP /YX
+# ADD BASE RSC /l 0x409 /d "NDEBUG"
+# ADD RSC /l 0x409 /d "NDEBUG"
+BSC32=bscmake.exe
+# ADD BASE BSC32 /nologo
+# ADD BSC32 /nologo
+LIB32=link.exe -lib
+# ADD BASE LIB32 /nologo
+# ADD LIB32 /nologo
+
+!ELSEIF "$(CFG)" == "cgilib - Win32 Debug"
+
+# PROP BASE Use_MFC 0
+# PROP BASE Use_Debug_Libraries 1
+# PROP BASE Output_Dir "Debug"
+# PROP BASE Intermediate_Dir "Debug"
+# PROP BASE Target_Dir ""
+# PROP Use_MFC 0
+# PROP Use_Debug_Libraries 1
+# PROP Output_!
Dir "Debug"
+# PROP Intermediate_Dir "Debug"
+# PROP Target_Dir ""
+# ADD BASE CPP /nologo /W3 /Gm /GX /ZI /Od /D "WIN32" /D "_DEBUG" /D "_MBCS" /D "_LIB" /YX /FD /GZ /c
+# ADD CPP /nologo /MTd /W3 /Gm /GX /ZI /Od /I "..\\" /D "WIN32" /D "_DEBUG" /D "_CONSOLE" /D "_MBCS" /D "_CTYPE_DISABLE_MACROS" /FR /FD /GZ /c
+# SUBTRACT CPP /YX
+# ADD BASE RSC /l 0x409 /d "_DEBUG"
+# ADD RSC /l 0x409 /d "_DEBUG"
+BSC32=bscmake.exe
+# ADD BASE BSC32 /nologo
+# ADD BSC32 /nologo
+LIB32=link.exe -lib
+# ADD BASE LIB32 /nologo
+# ADD LIB32 /nologo
+
+!ENDIF
+
+# Begin Target
+
+# Name "cgilib - Win32 Release"
+# Name "cgilib - Win32 Debug"
+# Begin Group "Source Files"
+
+# PROP Default_Filter "cpp;c;cxx;rc;def;r;odl;idl;hpj;bat"
+# Begin Source File
+
+SOURCE=.\cgi.c
+# End Source File
+# End Group
+# Begin Group "Header Files"
+
+# PROP Default_Filter "h;hpp;hxx;hm;inl"
+# End Group
+# End Target
+# End Project
Modified: trunk/orca/packages/rrdtool-1.0.7.2/CONTRIBUTORS
==============================================================================
--- trunk/orca/packages/rrdtool-1.0.7.2/CONTRIBUTORS (original)
+++ trunk/orca/packages/rrdtool-1.0.7.2/CONTRIBUTORS Sat Jul 13 19:22:39 2002
@@ -9,38 +9,47 @@
Debugging and code contributions
+ Andreas Kroomaa <andre at ml.ee>
+ Andrew Turner <turner at mint.net> (LAST consolidator)
+ Blair Zajac <bzajac at geostaff.com>
+ Dan Dunn <dandunn at computer.org>
+ Hermann Hueni <hueni at glue.ch> (SunOS porting)
Jeff R. Allen <jeff.allen at acm.org> (autoconfigure, portability)
+ Jost.Krieger <Jost.Krieger at ruhr-uni-bochum.de>
+ Otmar Lendl <O.Lendl at Austria.EU.net> (lots of bugfixes)
Philippe.Simonet <Philippe.Simonet at swisscom.com> (NT porting)
- Wrolf Courtney <wrolf at concentric.net> (HP-UX)
- Dan Dunn <dandunn at computer.org>
- Russ Wright <wright at LBL.Gov>
+ Russ Wright <rwwright at home.com>
Simon Leinen <simon at switch.ch>
- Jost.Krieger <Jost.Krieger at ruhr-uni-bochum.de>
- Blair Zajac <bzajac at geostaff.com>
- Andrew Turner <turner at mint.net> (LAST and TOTAL consolidators)
- Andreas Kroomaa <andre at ml.ee>
- Oleg Cherevko <olwi at icyb.kiev.ua>
- Otmar Lendl <O.Lendl at Austria.EU.net> (core dump fix)
- Tom Crawley <Tom.Crawley at hi.riotinto.com.au> (GCC&HP configuration)
- Jeremy Fischer <jeremy at pobox.com> (Makefile changes & RPM builds)
+ Wrolf Courtney <wrolf at concentric.net> (HP-UX)
Alan Lichty <alan_lichty at eli.net>
+ Alex van den Bogaerdt <alex at ergens.op.het.net> (rrd_resize.c and more)
+ Jeremy Fischer <jeremy at pobox.com> (Makefile changes & RPM builds)
+ Oleg Cherevko <olwi at icyb.kiev.ua>
Steen Linden <Steen.Linden at ebone.net>
- Alex van den Bogaerdt <alex at ergens.op.het.net> (rrd_resize.c)
+ Tom Crawley <Tom.Crawley at hi.riotinto.com.au> (GCC&HP configuration)
Documentation
- Russ Wright <rwwright at home.com>
- Wrolf Courtney <wrolf at concentric.net>
+ Alan Lichty <alan_lichty at eli.net>
+ Alex van den Bogaerdt <alex at ergens.op.het.net>
Amos Shapira <amos at gezernet.co.il>
Kai Siering <kai.siering at mediaways.net>
- Alex van den Bogaerdt <alex at ergens.op.het.net>
- Alan Lichty <alan_lichty at eli.net>
+ Russ Wright <rwwright at home.com>
+ Wrolf Courtney <wrolf at concentric.net>
+ Tobias Weingartner <weingart at cs.ualberta.ca>
+
+Internet Resources
+ LAN Services AG (www.lan.ch) for the http://rrdtool.eu.org domain reflector
Further I would like to note, that rrdtool would not exist without
the following free software products:
Perl by Larry Wall
gd library by Thomas Boutell
+ gifcode from David Koblas
+ libpng by Glenn Randers-Pehrson / Andreas Eric Dilger / Guy Eric Schalnat
+ cgilib by Martin Schulze
+ zlib by Jean-loup Gailly and Mark Adler
SNMP Perl-Module by Simon Leinen
and last but not least
@@ -51,4 +60,7 @@
at the Swiss Federal Institute of Technology who allow me to
use their network resources to publish rrdtool ...
+During Summer 1999 CAIDA (www.caida.org) has supported me in working full
+time on RRDtool ... A big thank you to them as well.
+
Tobias Oetiker <oetiker at ee.ethz.ch>
Modified: trunk/orca/packages/rrdtool-1.0.7.2/README
==============================================================================
--- trunk/orca/packages/rrdtool-1.0.7.2/README (original)
+++ trunk/orca/packages/rrdtool-1.0.7.2/README Sat Jul 13 19:22:39 2002
@@ -5,8 +5,8 @@
ranging from the temperature in your office to the number of octets which
have passed through the FDDI interface of your router. But it is not so
trivial to store this data in a efficient and systematic manner. This is
-where rrdtool kicks in. It lets you log and analyze the data you gather from
-all kinds of data- sources (DS). The data analysis part of rrdtool is based
+where RRDtool kicks in. It lets you log and analyze the data you gather from
+all kinds of data-sources (DS). The data analysis part of RRDtool is based
on the ability to quickly generate graphical representations of the data
values collected over a definable time period.
@@ -14,47 +14,75 @@
To compile:
-----------
- sh configure
- make
+ sh configure
+ make <------ GNU make
+ make install <------ GNU make
- (Please report configure/compilation problems to
- the developers list, so I can make it better. -jra)
-
- Please note that you should get perl-5.004 for things to work
+This will configure, compile and install RRDtool in /usr/local/rrdtool-VERSION.
+If you prefer to install RRDtool in some other place, use
- Win32 users:
- ------------
+ sh configure --prefix=/some/other/RRDtool-dir
- Win32 things are controlled by the #define WIN32.
- If your compiler doesn't define that, you should add it to
- the compile line. -jeff (jeff.allen at acm.org)
+If you prefer to live with shared libraries, make sure you add the --enable-shared
+option to your configure call.
+ sh configure --enable-shared
-Perl bindings:
---------------
-The package contains two sets of Perl bindings.
+The configure script will try to find your perl installation (5.004 preferred).
+If it does not find it, you can still build RRDtool but no perl modules will be
+generated.
+
+By default the perl modules will be installed under the RRDtool install directory.
+This will require you to use a 'use lib' statement in your RRDtool perl programs.
+If you do not care what happens to your site-perl directory, you can also use
-a) perl-piped which uses teh rrdtool through a set of pipes
-b) perl-shared which builds a shared library from rrdtool
+ make site-perl-install
-Both bindings get built by the toplevel make. Additionally
-you might want to:
+will install the perl modules whereever you keep your local perl modules.
+Doing this reliefs you from using 'use lib' in your scripts.
-make test
-make install
+Getting Started:
+----------------
-There is an example script in each of the directories. The example
-scrips work even when you don't intall the perl extensions.
+Either after compiling or after installing you can try the example
+RRDtool applications in the examples directory.
To learn:
---------
Read the documentation in the doc directory. Start of with
-rrdtool. All documents are available as html and as ascii text.
+RRDtool. All documents are available as html and as ASCII text.
-If you want to know about the format of the logfiles check
+If you are looking for a more slow paced introduction, make sure to read
+Alex van den Bogaerdts rrdintro which is also available from the doc
+directory.
+
+If you want to know about the format of the log files check
src/rrd_format.h there are a lot of comments in there ...
+How to make Tobi happy:
+-----------------------
+
+If you want to show your appreciation for RRDtool you could make me happy
+by going to www.cdnow.com/gift/oetiker at ee.ethz.ch and getting me a CD from
+my CD wish list ...
+
+
+How to keep in touch:
+---------------------
+
+There are 3 Mailing lists for RRDtool:
+
+rrd-announce LOW volume RRDtool Announcements List (Only Stable Releases)
+rrd-users For discussion amongst people who use RRDtool in their applications
+rrd-developers For people who actually HACK RRDtool code
+
+To subscribe to <MAILGLIST> send a message with the subject 'subscribe'
+to <MAILGLIST>-request at list.ee.ethz.ch.
+
+Note, that postings to rrd-announce will always be cross-posted
+to rrd-users and rrd-developers as well.
+
To Contribute:
--------------
@@ -62,16 +90,11 @@
send complete patches. A complete patch patches the CODE as well
as the CHANGES, CONTRIBUTORS and the POD files.
-If you want to keep in touch, make sure you subscribe to the
-mrtg-developers mailinglist by sending a message with the subject
-'subscribe' to mrtg-developers-request at list.ee.ethz.ch
+Use GNU diff --unified --recursive olddir newdir to build your patches.
The latest Version:
-------------------
-
Is available from http://ee-staff.ethz.ch/~oetiker/webtools/rrdtool/
Tobias Oetiker <oetiker at ee.ethz.ch>
-
-
Added: trunk/orca/packages/rrdtool-1.0.7.2/zlib-1.1.3/infcodes.h
==============================================================================
--- trunk/orca/packages/rrdtool-1.0.7.2/zlib-1.1.3/infcodes.h (original)
+++ trunk/orca/packages/rrdtool-1.0.7.2/zlib-1.1.3/infcodes.h Sat Jul 13 19:22:39 2002
@@ -0,0 +1,27 @@
+/* infcodes.h -- header to use infcodes.c
+ * Copyright (C) 1995-1998 Mark Adler
+ * For conditions of distribution and use, see copyright notice in zlib.h
+ */
+
+/* WARNING: this file should *not* be used by applications. It is
+ part of the implementation of the compression library and is
+ subject to change. Applications should only use zlib.h.
+ */
+
+struct inflate_codes_state;
+typedef struct inflate_codes_state FAR inflate_codes_statef;
+
+extern inflate_codes_statef *inflate_codes_new OF((
+ uInt, uInt,
+ inflate_huft *, inflate_huft *,
+ z_streamp ));
+
+extern int inflate_codes OF((
+ inflate_blocks_statef *,
+ z_streamp ,
+ int));
+
+extern void inflate_codes_free OF((
+ inflate_codes_statef *,
+ z_streamp ));
+
Added: trunk/orca/packages/rrdtool-1.0.7.2/zlib-1.1.3/configure
==============================================================================
--- trunk/orca/packages/rrdtool-1.0.7.2/zlib-1.1.3/configure (original)
+++ trunk/orca/packages/rrdtool-1.0.7.2/zlib-1.1.3/configure Sat Jul 13 19:22:40 2002
@@ -0,0 +1,212 @@
+#!/bin/sh
+# configure script for zlib. This script is needed only if
+# you wish to build a shared library and your system supports them,
+# of if you need special compiler, flags or install directory.
+# Otherwise, you can just use directly "make test; make install"
+#
+# To create a shared library, use "configure --shared"; by default a static
+# library is created. If the primitive shared library support provided here
+# does not work, use ftp://prep.ai.mit.edu/pub/gnu/libtool-*.tar.gz
+#
+# To impose specific compiler or flags or install directory, use for example:
+# prefix=$HOME CC=cc CFLAGS="-O4" ./configure
+# or for csh/tcsh users:
+# (setenv prefix $HOME; setenv CC cc; setenv CFLAGS "-O4"; ./configure)
+# LDSHARED is the command to be used to create a shared library
+
+# Incorrect settings of CC or CFLAGS may prevent creating a shared library.
+# If you have problems, try without defining CC and CFLAGS before reporting
+# an error.
+
+LIBS=libz.a
+SHAREDLIB=libz.so
+VER=`sed -n -e '/VERSION "/s/.*"\(.*\)".*/\1/p' < zlib.h`
+AR=${AR-"ar rc"}
+RANLIB=${RANLIB-"ranlib"}
+prefix=${prefix-/usr/local}
+exec_prefix=${exec_prefix-'${prefix}'}
+libdir=${libdir-'${exec_prefix}/lib'}
+includedir=${includedir-'${prefix}/include'}
+shared_ext='.so'
+shared=0
+gcc=0
+old_cc="$CC"
+old_cflags="$CFLAGS"
+
+while test $# -ge 1
+do
+case "$1" in
+ -h* | --h*)
+ echo 'usage:'
+ echo ' configure [--shared] [--prefix=PREFIX] [--exec_prefix=EXPREFIX]'
+ echo ' [--libdir=LIBDIR] [--includedir=INCLUDEDIR]'
+ exit 0;;
+ -p*=* | --p*=*) prefix=`echo $1 | sed 's/[-a-z_]*=//'`; shift;;
+ -e*=* | --e*=*) exec_prefix=`echo $1 | sed 's/[-a-z_]*=//'`; shift;;
+ -l*=* | --libdir=*) libdir=`echo $1 | sed 's/[-a-z_]*=//'`; shift;;
+ -i*=* | --includedir=*) includedir=`echo $1 | sed 's/[-a-z_]*=//'`;shift;;
+ -p* | --p*) prefix="$2"; shift; shift;;
+ -e* | --e*) exec_prefix="$2"; shift; shift;;
+ -l* | --l*) libdir="$2"; shift; shift;;
+ -i* | --i*) includedir="$2"; shift; shift;;
+ -s* | --s*) shared=1; shift;;
+ esac
+done
+
+test=ztest$$
+cat > $test.c <<EOF
+extern int getchar();
+int hello() {return getchar();}
+EOF
+
+test -z "$CC" && echo Checking for gcc...
+cc=${CC-gcc}
+cflags=${CFLAGS-"-O3"}
+# to force the asm version use: CFLAGS="-O3 -DASMV" ./configure
+case "$cc" in
+ *gcc*) gcc=1;;
+esac
+
+if test "$gcc" -eq 1 && ($cc -c $cflags $test.c) 2>/dev/null; then
+ CC="$cc"
+ SFLAGS=${CFLAGS-"-fPIC -O3"}
+ CFLAGS="$cflags"
+ case `(uname -s || echo unknown) 2>/dev/null` in
+ Linux | linux) LDSHARED=${LDSHARED-"gcc -shared -Wl,-soname,libz.so.1"};;
+ *) LDSHARED=${LDSHARED-"gcc -shared"};;
+ esac
+else
+ # find system name and corresponding cc options
+ CC=${CC-cc}
+ case `(uname -sr || echo unknown) 2>/dev/null` in
+ HP-UX*) SFLAGS=${CFLAGS-"-O +z"}
+ CFLAGS=${CFLAGS-"-O"}
+# LDSHARED=${LDSHARED-"ld -b +vnocompatwarnings"}
+ LDSHARED=${LDSHARED-"ld -b"}
+ shared_ext='.sl'
+ SHAREDLIB='libz.sl';;
+ IRIX*) SFLAGS=${CFLAGS-"-ansi -O2 -rpath ."}
+ CFLAGS=${CFLAGS-"-ansi -O2"}
+ LDSHARED=${LDSHARED-"cc -shared"};;
+ OSF1\ V4*) SFLAGS=${CFLAGS-"-O -std1"}
+ CFLAGS=${CFLAGS-"-O -std1"}
+ LDSHARED=${LDSHARED-"cc -shared -Wl,-soname,$SHAREDLIB -Wl,-msym -Wl,-rpath,$(libdir) -Wl,-set_version,${VER}:1.0"};;
+ OSF1*) SFLAGS=${CFLAGS-"-O -std1"}
+ CFLAGS=${CFLAGS-"-O -std1"}
+ LDSHARED=${LDSHARED-"cc -shared"};;
+ QNX*) SFLAGS=${CFLAGS-"-4 -O"}
+ CFLAGS=${CFLAGS-"-4 -O"}
+ LDSHARED=${LDSHARED-"cc"}
+ RANLIB=${RANLIB-"true"}
+ AR="cc -A";;
+ SCO_SV\ 3.2*) SFLAGS=${CFLAGS-"-O3 -dy -KPIC "}
+ CFLAGS=${CFLAGS-"-O3"}
+ LDSHARED=${LDSHARED-"cc -dy -KPIC -G"};;
+ SunOS\ 5*) SFLAGS=${CFLAGS-"-fast -xcg89 -KPIC -R."}
+ CFLAGS=${CFLAGS-"-fast -xcg89"}
+ LDSHARED=${LDSHARED-"cc -G"};;
+ SunOS\ 4*) SFLAGS=${CFLAGS-"-O2 -PIC"}
+ CFLAGS=${CFLAGS-"-O2"}
+ LDSHARED=${LDSHARED-"ld"};;
+ UNIX_System_V\ 4.2.0)
+ SFLAGS=${CFLAGS-"-KPIC -O"}
+ CFLAGS=${CFLAGS-"-O"}
+ LDSHARED=${LDSHARED-"cc -G"};;
+ UNIX_SV\ 4.2MP)
+ SFLAGS=${CFLAGS-"-Kconform_pic -O"}
+ CFLAGS=${CFLAGS-"-O"}
+ LDSHARED=${LDSHARED-"cc -G"};;
+ # send working options for other systems to support at gzip.org
+ *) SFLAGS=${CFLAGS-"-O"}
+ CFLAGS=${CFLAGS-"-O"}
+ LDSHARED=${LDSHARED-"cc -shared"};;
+ esac
+fi
+
+if test $shared -eq 1; then
+ echo Checking for shared library support...
+ # we must test in two steps (cc then ld), required at least on SunOS 4.x
+ if test "`($CC -c $SFLAGS $test.c) 2>&1`" = "" &&
+ test "`($LDSHARED -o $test$shared_ext $test.o) 2>&1`" = ""; then
+ CFLAGS="$SFLAGS"
+ LIBS="$SHAREDLIB.$VER"
+ echo Building shared library $SHAREDLIB.$VER with $CC.
+ elif test -z "$old_cc" -a -z "$old_cflags"; then
+ echo No shared library suppport.
+ shared=0;
+ else
+ echo 'No shared library suppport; try without defining CC and CFLAGS'
+ shared=0;
+ fi
+fi
+if test $shared -eq 0; then
+ LDSHARED="$CC"
+ echo Building static library $LIBS version $VER with $CC.
+fi
+
+cat > $test.c <<EOF
+#include <unistd.h>
+int main() { return 0; }
+EOF
+if test "`($CC -c $CFLAGS $test.c) 2>&1`" = ""; then
+ CFLAGS="$CFLAGS -DHAVE_UNISTD_H"
+ echo "Checking for unistd.h... Yes."
+else
+ echo "Checking for unistd.h... No."
+fi
+
+cat > $test.c <<EOF
+#include <errno.h>
+int main() { return 0; }
+EOF
+if test "`($CC -c $CFLAGS $test.c) 2>&1`" = ""; then
+ echo "Checking for errno.h... Yes."
+else
+ echo "Checking for errno.h... No."
+ CFLAGS="$CFLAGS -DNO_ERRNO_H"
+fi
+
+cat > $test.c <<EOF
+#include <sys/types.h>
+#include <sys/mman.h>
+#include <sys/stat.h>
+caddr_t hello() {
+ return mmap((caddr_t)0, (off_t)0, PROT_READ, MAP_SHARED, 0, (off_t)0);
+}
+EOF
+if test "`($CC -c $CFLAGS $test.c) 2>&1`" = ""; then
+ CFLAGS="$CFLAGS -DUSE_MMAP"
+ echo Checking for mmap support... Yes.
+else
+ echo Checking for mmap support... No.
+fi
+
+CPP=${CPP-"$CC -E"}
+case $CFLAGS in
+ *ASMV*)
+ if test "`nm $test.o | grep _hello`" = ""; then
+ CPP="$CPP -DNO_UNDERLINE"
+ echo Checking for underline in external names... No.
+ else
+ echo Checking for underline in external names... Yes.
+ fi;;
+esac
+
+rm -f $test.[co] $test$shared_ext
+
+# udpate Makefile
+sed < Makefile.in "
+/^CC *=/s%=.*%=$CC%
+/^CFLAGS *=/s%=.*%=$CFLAGS%
+/^CPP *=/s%=.*%=$CPP%
+/^LDSHARED *=/s%=.*%=$LDSHARED%
+/^LIBS *=/s%=.*%=$LIBS%
+/^SHAREDLIB *=/s%=.*%=$SHAREDLIB%
+/^AR *=/s%=.*%=$AR%
+/^RANLIB *=/s%=.*%=$RANLIB%
+/^VER *=/s%=.*%=$VER%
+/^prefix *=/s%=.*%=$prefix%
+/^exec_prefix *=/s%=.*%=$exec_prefix%
+/^libdir *=/s%=.*%=$libdir%
+/^includedir *=/s%=.*%=$includedir%
+" > Makefile
Added: trunk/orca/packages/rrdtool-1.0.7.2/zlib-1.1.3/README.rrdtool
==============================================================================
--- trunk/orca/packages/rrdtool-1.0.7.2/zlib-1.1.3/README.rrdtool (original)
+++ trunk/orca/packages/rrdtool-1.0.7.2/zlib-1.1.3/README.rrdtool Sat Jul 13 19:22:40 2002
@@ -0,0 +1,2 @@
+this version of zlib has been included with rrdtool ...
+the contrib directory has been removed ...
Added: trunk/orca/packages/rrdtool-1.0.7.2/zlib-1.1.3/Makefile.in
==============================================================================
--- trunk/orca/packages/rrdtool-1.0.7.2/zlib-1.1.3/Makefile.in (original)
+++ trunk/orca/packages/rrdtool-1.0.7.2/zlib-1.1.3/Makefile.in Sat Jul 13 19:22:40 2002
@@ -0,0 +1,324 @@
+# Makefile.in generated automatically by automake 1.4 from Makefile.am
+
+# Copyright (C) 1994, 1995-8, 1999 Free Software Foundation, Inc.
+# This Makefile.in is free software; the Free Software Foundation
+# gives unlimited permission to copy and/or distribute it,
+# with or without modifications, as long as this notice is preserved.
+
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY, to the extent permitted by law; without
+# even the implied warranty of MERCHANTABILITY or FITNESS FOR A
+# PARTICULAR PURPOSE.
+
+#AUTOMAKE_OPTIONS = foreign
+
+# read local config files
+#ACLOCAL_M4 = $(top_srcdir)/config/aclocal.m4
+#AUTOHEADER = @AUTOHEADER@ --localdir=$(top_srcdir)/config
+
+
+SHELL = @SHELL@
+
+srcdir = @srcdir@
+top_srcdir = @top_srcdir@
+VPATH = @srcdir@
+prefix = @prefix@
+exec_prefix = @exec_prefix@
+
+bindir = @bindir@
+sbindir = @sbindir@
+libexecdir = @libexecdir@
+datadir = @datadir@
+sysconfdir = @sysconfdir@
+sharedstatedir = @sharedstatedir@
+localstatedir = @localstatedir@
+libdir = @libdir@
+infodir = @infodir@
+mandir = @mandir@
+includedir = @includedir@
+oldincludedir = /usr/include
+
+DESTDIR =
+
+pkgdatadir = $(datadir)/@PACKAGE@
+pkglibdir = $(libdir)/@PACKAGE@
+pkgincludedir = $(includedir)/@PACKAGE@
+
+top_builddir = ..
+
+ACLOCAL = @ACLOCAL@
+AUTOCONF = @AUTOCONF@
+AUTOMAKE = @AUTOMAKE@
+AUTOHEADER = @AUTOHEADER@
+
+INSTALL = @INSTALL@
+INSTALL_PROGRAM = @INSTALL_PROGRAM@ $(AM_INSTALL_PROGRAM_FLAGS)
+INSTALL_DATA = @INSTALL_DATA@
+INSTALL_SCRIPT = @INSTALL_SCRIPT@
+transform = @program_transform_name@
+
+NORMAL_INSTALL = :
+PRE_INSTALL = :
+POST_INSTALL = :
+NORMAL_UNINSTALL = :
+PRE_UNINSTALL = :
+POST_UNINSTALL = :
+build_alias = @build_alias@
+build_triplet = @build@
+host_alias = @host_alias@
+host_triplet = @host@
+target_alias = @target_alias@
+target_triplet = @target@
+CC = @CC@
+CFLAGS = @CFLAGS@
+CGI_LIB_DIR = @CGI_LIB_DIR@
+COMP_PERL = @COMP_PERL@
+CPP = @CPP@
+GD_LIB_DIR = @GD_LIB_DIR@
+LIBTOOL = @LIBTOOL@
+PERL = @PERL@
+PNG_LIB_DIR = @PNG_LIB_DIR@
+RANLIB = @RANLIB@
+ZLIB_LIB_DIR = @ZLIB_LIB_DIR@
+
+EXTRA_DIST = ChangeLog FAQ INDEX README README.rrdtool algorithm.txt zlib.dsp zlib.dsw zlib.3
+
+noinst_LTLIBRARIES = librrd_z.la
+
+librrd_z_la_SOURCES = adler32.c compress.c crc32.c deflate.c gzio.c infblock.c infcodes.c inffast.c inflate.c inftrees.c infutil.c trees.c uncompr.c zutil.c deflate.h infcodes.h inffixed.h infutil.h zconf.h zutil.h infblock.h inffast.h inftrees.h trees.h zlib.h
+
+mkinstalldirs = $(SHELL) $(top_srcdir)/config/mkinstalldirs
+CONFIG_HEADER = ../config/config.h
+CONFIG_CLEAN_FILES =
+LTLIBRARIES = $(noinst_LTLIBRARIES)
+
+
+DEFS = @DEFS@ -I. -I$(srcdir) -I../config
+CPPFLAGS = @CPPFLAGS@
+LDFLAGS = @LDFLAGS@
+LIBS = @LIBS@
+librrd_z_la_LDFLAGS =
+librrd_z_la_LIBADD =
+librrd_z_la_OBJECTS = adler32.lo compress.lo crc32.lo deflate.lo \
+gzio.lo infblock.lo infcodes.lo inffast.lo inflate.lo inftrees.lo \
+infutil.lo trees.lo uncompr.lo zutil.lo
+COMPILE = $(CC) $(DEFS) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS)
+LTCOMPILE = $(LIBTOOL) --mode=compile $(CC) $(DEFS) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS)
+CCLD = $(CC)
+LINK = $(LIBTOOL) --mode=link $(CCLD) $(AM_CFLAGS) $(CFLAGS) $(LDFLAGS) -o $@
+DIST_COMMON = README ChangeLog Makefile.am Makefile.in configure
+
+
+PACKAGE = @PACKAGE@
+VERSION = @VERSION@
+
+DISTFILES = $(DIST_COMMON) $(SOURCES) $(HEADERS) $(TEXINFOS) $(EXTRA_DIST)
+
+TAR = tar
+GZIP_ENV = --best
+SOURCES = $(librrd_z_la_SOURCES)
+OBJECTS = $(librrd_z_la_OBJECTS)
+
+all: all-redirect
+.SUFFIXES:
+.SUFFIXES: .S .c .lo .o .s
+$(srcdir)/Makefile.in: Makefile.am $(top_srcdir)/configure.in $(ACLOCAL_M4)
+ cd $(top_srcdir) && $(AUTOMAKE) --gnu --include-deps zlib-1.1.3/Makefile
+
+Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status
+ cd $(top_builddir) \
+ && CONFIG_FILES=$(subdir)/$@ CONFIG_HEADERS= $(SHELL) ./config.status
+
+
+mostlyclean-noinstLTLIBRARIES:
+
+clean-noinstLTLIBRARIES:
+ -test -z "$(noinst_LTLIBRARIES)" || rm -f $(noinst_LTLIBRARIES)
+
+distclean-noinstLTLIBRARIES:
+
+maintainer-clean-noinstLTLIBRARIES:
+
+.c.o:
+ $(COMPILE) -c $<
+
+.s.o:
+ $(COMPILE) -c $<
+
+.S.o:
+ $(COMPILE) -c $<
+
+mostlyclean-compile:
+ -rm -f *.o core *.core
+
+clean-compile:
+
+distclean-compile:
+ -rm -f *.tab.c
+
+maintainer-clean-compile:
+
+.c.lo:
+ $(LIBTOOL) --mode=compile $(COMPILE) -c $<
+
+.s.lo:
+ $(LIBTOOL) --mode=compile $(COMPILE) -c $<
+
+.S.lo:
+ $(LIBTOOL) --mode=compile $(COMPILE) -c $<
+
+mostlyclean-libtool:
+ -rm -f *.lo
+
+clean-libtool:
+ -rm -rf .libs _libs
+
+distclean-libtool:
+
+maintainer-clean-libtool:
+
+librrd_z.la: $(librrd_z_la_OBJECTS) $(librrd_z_la_DEPENDENCIES)
+ $(LINK) $(librrd_z_la_LDFLAGS) $(librrd_z_la_OBJECTS) $(librrd_z_la_LIBADD) $(LIBS)
+
+tags: TAGS
+
+ID: $(HEADERS) $(SOURCES) $(LISP)
+ list='$(SOURCES) $(HEADERS)'; \
+ unique=`for i in $$list; do echo $$i; done | \
+ awk ' { files[$$0] = 1; } \
+ END { for (i in files) print i; }'`; \
+ here=`pwd` && cd $(srcdir) \
+ && mkid -f$$here/ID $$unique $(LISP)
+
+TAGS: $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) $(LISP)
+ tags=; \
+ here=`pwd`; \
+ list='$(SOURCES) $(HEADERS)'; \
+ unique=`for i in $$list; do echo $$i; done | \
+ awk ' { files[$$0] = 1; } \
+ END { for (i in files) print i; }'`; \
+ test -z "$(ETAGS_ARGS)$$unique$(LISP)$$tags" \
+ || (cd $(srcdir) && etags $(ETAGS_ARGS) $$tags $$unique $(LISP) -o $$here/TAGS)
+
+mostlyclean-tags:
+
+clean-tags:
+
+distclean-tags:
+ -rm -f TAGS ID
+
+maintainer-clean-tags:
+
+distdir = $(top_builddir)/$(PACKAGE)-$(VERSION)/$(subdir)
+
+subdir = zlib-1.1.3
+
+distdir: $(DISTFILES)
+ @for file in $(DISTFILES); do \
+ d=$(srcdir); \
+ if test -d $$d/$$file; then \
+ cp -pr $$/$$file $(distdir)/$$file; \
+ else \
+ test -f $(distdir)/$$file \
+ || ln $$d/$$file $(distdir)/$$file 2> /dev/null \
+ || cp -p $$d/$$file $(distdir)/$$file || :; \
+ fi; \
+ done
+adler32.lo adler32.o : adler32.c zlib.h zconf.h
+compress.lo compress.o : compress.c zlib.h zconf.h
+crc32.lo crc32.o : crc32.c zlib.h zconf.h
+deflate.lo deflate.o : deflate.c deflate.h zutil.h zlib.h zconf.h
+gzio.lo gzio.o : gzio.c zutil.h zlib.h zconf.h
+infblock.lo infblock.o : infblock.c zutil.h zlib.h zconf.h infblock.h \
+ inftrees.h infcodes.h infutil.h
+infcodes.lo infcodes.o : infcodes.c zutil.h zlib.h zconf.h inftrees.h \
+ infblock.h infcodes.h infutil.h inffast.h
+inffast.lo inffast.o : inffast.c zutil.h zlib.h zconf.h inftrees.h \
+ infblock.h infcodes.h infutil.h inffast.h
+inflate.lo inflate.o : inflate.c zutil.h zlib.h zconf.h infblock.h
+inftrees.lo inftrees.o : inftrees.c zutil.h zlib.h zconf.h inftrees.h \
+ inffixed.h
+infutil.lo infutil.o : infutil.c zutil.h zlib.h zconf.h infblock.h \
+ inftrees.h infcodes.h infutil.h
+trees.lo trees.o : trees.c deflate.h zutil.h zlib.h zconf.h trees.h
+uncompr.lo uncompr.o : uncompr.c zlib.h zconf.h
+zutil.lo zutil.o : zutil.c zutil.h zlib.h zconf.h
+
+info-am:
+info: info-am
+dvi-am:
+dvi: dvi-am
+check-am: all-am
+check: check-am
+installcheck-am:
+installcheck: installcheck-am
+install-exec-am:
+install-exec: install-exec-am
+
+install-data-am:
+install-data: install-data-am
+
+install-am: all-am
+ @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am
+install: install-am
+uninstall-am:
+uninstall: uninstall-am
+all-am: Makefile $(LTLIBRARIES)
+all-redirect: all-am
+install-strip:
+ $(MAKE) $(AM_MAKEFLAGS) AM_INSTALL_PROGRAM_FLAGS=-s install
+installdirs:
+
+
+mostlyclean-generic:
+
+clean-generic:
+
+distclean-generic:
+ -rm -f Makefile $(CONFIG_CLEAN_FILES)
+ -rm -f config.cache config.log stamp-h stamp-h[0-9]*
+
+maintainer-clean-generic:
+mostlyclean-am: mostlyclean-noinstLTLIBRARIES mostlyclean-compile \
+ mostlyclean-libtool mostlyclean-tags \
+ mostlyclean-generic
+
+mostlyclean: mostlyclean-am
+
+clean-am: clean-noinstLTLIBRARIES clean-compile clean-libtool \
+ clean-tags clean-generic mostlyclean-am
+
+clean: clean-am
+
+distclean-am: distclean-noinstLTLIBRARIES distclean-compile \
+ distclean-libtool distclean-tags distclean-generic \
+ clean-am
+ -rm -f libtool
+
+distclean: distclean-am
+
+maintainer-clean-am: maintainer-clean-noinstLTLIBRARIES \
+ maintainer-clean-compile maintainer-clean-libtool \
+ maintainer-clean-tags maintainer-clean-generic \
+ distclean-am
+ @echo "This command is intended for maintainers to use;"
+ @echo "it deletes files that may require special tools to rebuild."
+
+maintainer-clean: maintainer-clean-am
+
+.PHONY: mostlyclean-noinstLTLIBRARIES distclean-noinstLTLIBRARIES \
+clean-noinstLTLIBRARIES maintainer-clean-noinstLTLIBRARIES \
+mostlyclean-compile distclean-compile clean-compile \
+maintainer-clean-compile mostlyclean-libtool distclean-libtool \
+clean-libtool maintainer-clean-libtool tags mostlyclean-tags \
+distclean-tags clean-tags maintainer-clean-tags distdir info-am info \
+dvi-am dvi check check-am installcheck-am installcheck install-exec-am \
+install-exec install-data-am install-data install-am install \
+uninstall-am uninstall all-redirect all-am all installdirs \
+mostlyclean-generic distclean-generic clean-generic \
+maintainer-clean-generic clean mostlyclean distclean maintainer-clean
+
+
+# Tell versions [3.59,3.63) of GNU make to not export all variables.
+# Otherwise a system limit (for SysV at least) may be exceeded.
+.NOEXPORT:
Added: trunk/orca/packages/rrdtool-1.0.7.2/zlib-1.1.3/inffixed.h
==============================================================================
--- trunk/orca/packages/rrdtool-1.0.7.2/zlib-1.1.3/inffixed.h (original)
+++ trunk/orca/packages/rrdtool-1.0.7.2/zlib-1.1.3/inffixed.h Sat Jul 13 19:22:40 2002
@@ -0,0 +1,151 @@
+/* inffixed.h -- table for decoding fixed codes
+ * Generated automatically by the maketree.c program
+ */
+
+/* WARNING: this file should *not* be used by applications. It is
+ part of the implementation of the compression library and is
+ subject to change. Applications should only use zlib.h.
+ */
+
+local uInt fixed_bl = 9;
+local uInt fixed_bd = 5;
+local inflate_huft fixed_tl[] = {
+ {{{96,7}},256}, {{{0,8}},80}, {{{0,8}},16}, {{{84,8}},115},
+ {{{82,7}},31}, {{{0,8}},112}, {{{0,8}},48}, {{{0,9}},192},
+ {{{80,7}},10}, {{{0,8}},96}, {{{0,8}},32}, {{{0,9}},160},
+ {{{0,8}},0}, {{{0,8}},128}, {{{0,8}},64}, {{{0,9}},224},
+ {{{80,7}},6}, {{{0,8}},88}, {{{0,8}},24}, {{{0,9}},144},
+ {{{83,7}},59}, {{{0,8}},120}, {{{0,8}},56}, {{{0,9}},208},
+ {{{81,7}},17}, {{{0,8}},104}, {{{0,8}},40}, {{{0,9}},176},
+ {{{0,8}},8}, {{{0,8}},136}, {{{0,8}},72}, {{{0,9}},240},
+ {{{80,7}},4}, {{{0,8}},84}, {{{0,8}},20}, {{{85,8}},227},
+ {{{83,7}},43}, {{{0,8}},116}, {{{0,8}},52}, {{{0,9}},200},
+ {{{81,7}},13}, {{{0,8}},100}, {{{0,8}},36}, {{{0,9}},168},
+ {{{0,8}},4}, {{{0,8}},132}, {{{0,8}},68}, {{{0,9}},232},
+ {{{80,7}},8}, {{{0,8}},92}, {{{0,8}},28}, {{{0,9}},152},
+ {{{84,7}},83}, {{{0,8}},124}, {{{0,8}},60}, {{{0,9}},216},
+ {{{82,7}},23}, {{{0,8}},108}, {{{0,8}},44}, {{{0,9}},184},
+ {{{0,8}},12}, {{{0,8}},140}, {{{0,8}},76}, {{{0,9}},248},
+ {{{80,7}},3}, {{{0,8}},82}, {{{0,8}},18}, {{{85,8}},163},
+ {{{83,7}},35}, {{{0,8}},114}, {{{0,8}},50}, {{{0,9}},196},
+ {{{81,7}},11}, {{{0,8}},98}, {{{0,8}},34}, {{{0,9}},164},
+ {{{0,8}},2}, {{{0,8}},130}, {{{0,8}},66}, {{{0,9}},228},
+ {{{80,7}},7}, {{{0,8}},90}, {{{0,8}},26}, {{{0,9}},148},
+ {{{84,7}},67}, {{{0,8}},122}, {{{0,8}},58}, {{{0,9}},212},
+ {{{82,7}},19}, {{{0,8}},106}, {{{0,8}},42}, {{{0,9}},180},
+ {{{0,8}},10}, {{{0,8}},138}, {{{0,8}},74}, {{{0,9}},244},
+ {{{80,7}},5}, {{{0,8}},86}, {{{0,8}},22}, {{{192,8}},0},
+ {{{83,7}},51}, {{{0,8}},118}, {{{0,8}},54}, {{{0,9}},204},
+ {{{81,7}},15}, {{{0,8}},102}, {{{0,8}},38}, {{{0,9}},172},
+ {{{0,8}},6}, {{{0,8}},134}, {{{0,8}},70}, {{{0,9}},236},
+ {{{80,7}},9}, {{{0,8}},94}, {{{0,8}},30}, {{{0,9}},156},
+ {{{84,7}},99}, {{{0,8}},126}, {{{0,8}},62}, {{{0,9}},220},
+ {{{82,7}},27}, {{{0,8}},110}, {{{0,8}},46}, {{{0,9}},188},
+ {{{0,8}},14}, {{{0,8}},142}, {{{0,8}},78}, {{{0,9}},252},
+ {{{96,7}},256}, {{{0,8}},81}, {{{0,8}},17}, {{{85,8}},131},
+ {{{82,7}},31}, {{{0,8}},113}, {{{0,8}},49}, {{{0,9}},194},
+ {{{80,7}},10}, {{{0,8}},97}, {{{0,8}},33}, {{{0,9}},162},
+ {{{0,8}},1}, {{{0,8}},129}, {{{0,8}},65}, {{{0,9}},226},
+ {{{80,7}},6}, {{{0,8}},89}, {{{0,8}},25}, {{{0,9}},146},
+ {{{83,7}},59}, {{{0,8}},121}, {{{0,8}},57}, {{{0,9}},210},
+ {{{81,7}},17}, {{{0,8}},105}, {{{0,8}},41}, {{{0,9}},178},
+ {{{0,8}},9}, {{{0,8}},137}, {{{0,8}},73}, {{{0,9}},242},
+ {{{80,7}},4}, {{{0,8}},85}, {{{0,8}},21}, {{{80,8}},258},
+ {{{83,7}},43}, {{{0,8}},117}, {{{0,8}},53}, {{{0,9}},202},
+ {{{81,7}},13}, {{{0,8}},101}, {{{0,8}},37}, {{{0,9}},170},
+ {{{0,8}},5}, {{{0,8}},133}, {{{0,8}},69}, {{{0,9}},234},
+ {{{80,7}},8}, {{{0,8}},93}, {{{0,8}},29}, {{{0,9}},154},
+ {{{84,7}},83}, {{{0,8}},125}, {{{0,8}},61}, {{{0,9}},218},
+ {{{82,7}},23}, {{{0,8}},109}, {{{0,8}},45}, {{{0,9}},186},
+ {{{0,8}},13}, {{{0,8}},141}, {{{0,8}},77}, {{{0,9}},250},
+ {{{80,7}},3}, {{{0,8}},83}, {{{0,8}},19}, {{{85,8}},195},
+ {{{83,7}},35}, {{{0,8}},115}, {{{0,8}},51}, {{{0,9}},198},
+ {{{81,7}},11}, {{{0,8}},99}, {{{0,8}},35}, {{{0,9}},166},
+ {{{0,8}},3}, {{{0,8}},131}, {{{0,8}},67}, {{{0,9}},230},
+ {{{80,7}},7}, {{{0,8}},91}, {{{0,8}},27}, {{{0,9}},150},
+ {{{84,7}},67}, {{{0,8}},123}, {{{0,8}},59}, {{{0,9}},214},
+ {{{82,7}},19}, {{{0,8}},107}, {{{0,8}},43}, {{{0,9}},182},
+ {{{0,8}},11}, {{{0,8}},139}, {{{0,8}},75}, {{{0,9}},246},
+ {{{80,7}},5}, {{{0,8}},87}, {{{0,8}},23}, {{{192,8}},0},
+ {{{83,7}},51}, {{{0,8}},119}, {{{0,8}},55}, {{{0,9}},206},
+ {{{81,7}},15}, {{{0,8}},103}, {{{0,8}},39}, {{{0,9}},174},
+ {{{0,8}},7}, {{{0,8}},135}, {{{0,8}},71}, {{{0,9}},238},
+ {{{80,7}},9}, {{{0,8}},95}, {{{0,8}},31}, {{{0,9}},158},
+ {{{84,7}},99}, {{{0,8}},127}, {{{0,8}},63}, {{{0,9}},222},
+ {{{82,7}},27}, {{{0,8}},111}, {{{0,8}},47}, {{{0,9}},190},
+ {{{0,8}},15}, {{{0,8}},143}, {{{0,8}},79}, {{{0,9}},254},
+ {{{96,7}},256}, {{{0,8}},80}, {{{0,8}},16}, {{{84,8}},115},
+ {{{82,7}},31}, {{{0,8}},112}, {{{0,8}},48}, {{{0,9}},193},
+ {{{80,7}},10}, {{{0,8}},96}, {{{0,8}},32}, {{{0,9}},161},
+ {{{0,8}},0}, {{{0,8}},128}, {{{0,8}},64}, {{{0,9}},225},
+ {{{80,7}},6}, {{{0,8}},88}, {{{0,8}},24}, {{{0,9}},145},
+ {{{83,7}},59}, {{{0,8}},120}, {{{0,8}},56}, {{{0,9}},209},
+ {{{81,7}},17}, {{{0,8}},104}, {{{0,8}},40}, {{{0,9}},177},
+ {{{0,8}},8}, {{{0,8}},136}, {{{0,8}},72}, {{{0,9}},241},
+ {{{80,7}},4}, {{{0,8}},84}, {{{0,8}},20}, {{{85,8}},227},
+ {{{83,7}},43}, {{{0,8}},116}, {{{0,8}},52}, {{{0,9}},201},
+ {{{81,7}},13}, {{{0,8}},100}, {{{0,8}},36}, {{{0,9}},169},
+ {{{0,8}},4}, {{{0,8}},132}, {{{0,8}},68}, {{{0,9}},233},
+ {{{80,7}},8}, {{{0,8}},92}, {{{0,8}},28}, {{{0,9}},153},
+ {{{84,7}},83}, {{{0,8}},124}, {{{0,8}},60}, {{{0,9}},217},
+ {{{82,7}},23}, {{{0,8}},108}, {{{0,8}},44}, {{{0,9}},185},
+ {{{0,8}},12}, {{{0,8}},140}, {{{0,8}},76}, {{{0,9}},249},
+ {{{80,7}},3}, {{{0,8}},82}, {{{0,8}},18}, {{{85,8}},163},
+ {{{83,7}},35}, {{{0,8}},114}, {{{0,8}},50}, {{{0,9}},197},
+ {{{81,7}},11}, {{{0,8}},98}, {{{0,8}},34}, {{{0,9}},165},
+ {{{0,8}},2}, {{{0,8}},130}, {{{0,8}},66}, {{{0,9}},229},
+ {{{80,7}},7}, {{{0,8}},90}, {{{0,8}},26}, {{{0,9}},149},
+ {{{84,7}},67}, {{{0,8}},122}, {{{0,8}},58}, {{{0,9}},213},
+ {{{82,7}},19}, {{{0,8}},106}, {{{0,8}},42}, {{{0,9}},181},
+ {{{0,8}},10}, {{{0,8}},138}, {{{0,8}},74}, {{{0,9}},245},
+ {{{80,7}},5}, {{{0,8}},86}, {{{0,8}},22}, {{{192,8}},0},
+ {{{83,7}},51}, {{{0,8}},118}, {{{0,8}},54}, {{{0,9}},205},
+ {{{81,7}},15}, {{{0,8}},102}, {{{0,8}},38}, {{{0,9}},173},
+ {{{0,8}},6}, {{{0,8}},134}, {{{0,8}},70}, {{{0,9}},237},
+ {{{80,7}},9}, {{{0,8}},94}, {{{0,8}},30}, {{{0,9}},157},
+ {{{84,7}},99}, {{{0,8}},126}, {{{0,8}},62}, {{{0,9}},221},
+ {{{82,7}},27}, {{{0,8}},110}, {{{0,8}},46}, {{{0,9}},189},
+ {{{0,8}},14}, {{{0,8}},142}, {{{0,8}},78}, {{{0,9}},253},
+ {{{96,7}},256}, {{{0,8}},81}, {{{0,8}},17}, {{{85,8}},131},
+ {{{82,7}},31}, {{{0,8}},113}, {{{0,8}},49}, {{{0,9}},195},
+ {{{80,7}},10}, {{{0,8}},97}, {{{0,8}},33}, {{{0,9}},163},
+ {{{0,8}},1}, {{{0,8}},129}, {{{0,8}},65}, {{{0,9}},227},
+ {{{80,7}},6}, {{{0,8}},89}, {{{0,8}},25}, {{{0,9}},147},
+ {{{83,7}},59}, {{{0,8}},121}, {{{0,8}},57}, {{{0,9}},211},
+ {{{81,7}},17}, {{{0,8}},105}, {{{0,8}},41}, {{{0,9}},179},
+ {{{0,8}},9}, {{{0,8}},137}, {{{0,8}},73}, {{{0,9}},243},
+ {{{80,7}},4}, {{{0,8}},85}, {{{0,8}},21}, {{{80,8}},258},
+ {{{83,7}},43}, {{{0,8}},117}, {{{0,8}},53}, {{{0,9}},203},
+ {{{81,7}},13}, {{{0,8}},101}, {{{0,8}},37}, {{{0,9}},171},
+ {{{0,8}},5}, {{{0,8}},133}, {{{0,8}},69}, {{{0,9}},235},
+ {{{80,7}},8}, {{{0,8}},93}, {{{0,8}},29}, {{{0,9}},155},
+ {{{84,7}},83}, {{{0,8}},125}, {{{0,8}},61}, {{{0,9}},219},
+ {{{82,7}},23}, {{{0,8}},109}, {{{0,8}},45}, {{{0,9}},187},
+ {{{0,8}},13}, {{{0,8}},141}, {{{0,8}},77}, {{{0,9}},251},
+ {{{80,7}},3}, {{{0,8}},83}, {{{0,8}},19}, {{{85,8}},195},
+ {{{83,7}},35}, {{{0,8}},115}, {{{0,8}},51}, {{{0,9}},199},
+ {{{81,7}},11}, {{{0,8}},99}, {{{0,8}},35}, {{{0,9}},167},
+ {{{0,8}},3}, {{{0,8}},131}, {{{0,8}},67}, {{{0,9}},231},
+ {{{80,7}},7}, {{{0,8}},91}, {{{0,8}},27}, {{{0,9}},151},
+ {{{84,7}},67}, {{{0,8}},123}, {{{0,8}},59}, {{{0,9}},215},
+ {{{82,7}},19}, {{{0,8}},107}, {{{0,8}},43}, {{{0,9}},183},
+ {{{0,8}},11}, {{{0,8}},139}, {{{0,8}},75}, {{{0,9}},247},
+ {{{80,7}},5}, {{{0,8}},87}, {{{0,8}},23}, {{{192,8}},0},
+ {{{83,7}},51}, {{{0,8}},119}, {{{0,8}},55}, {{{0,9}},207},
+ {{{81,7}},15}, {{{0,8}},103}, {{{0,8}},39}, {{{0,9}},175},
+ {{{0,8}},7}, {{{0,8}},135}, {{{0,8}},71}, {{{0,9}},239},
+ {{{80,7}},9}, {{{0,8}},95}, {{{0,8}},31}, {{{0,9}},159},
+ {{{84,7}},99}, {{{0,8}},127}, {{{0,8}},63}, {{{0,9}},223},
+ {{{82,7}},27}, {{{0,8}},111}, {{{0,8}},47}, {{{0,9}},191},
+ {{{0,8}},15}, {{{0,8}},143}, {{{0,8}},79}, {{{0,9}},255}
+ };
+local inflate_huft fixed_td[] = {
+ {{{80,5}},1}, {{{87,5}},257}, {{{83,5}},17}, {{{91,5}},4097},
+ {{{81,5}},5}, {{{89,5}},1025}, {{{85,5}},65}, {{{93,5}},16385},
+ {{{80,5}},3}, {{{88,5}},513}, {{{84,5}},33}, {{{92,5}},8193},
+ {{{82,5}},9}, {{{90,5}},2049}, {{{86,5}},129}, {{{192,5}},24577},
+ {{{80,5}},2}, {{{87,5}},385}, {{{83,5}},25}, {{{91,5}},6145},
+ {{{81,5}},7}, {{{89,5}},1537}, {{{85,5}},97}, {{{93,5}},24577},
+ {{{80,5}},4}, {{{88,5}},769}, {{{84,5}},49}, {{{92,5}},12289},
+ {{{82,5}},13}, {{{90,5}},3073}, {{{86,5}},193}, {{{192,5}},24577}
+ };
Added: trunk/orca/packages/rrdtool-1.0.7.2/zlib-1.1.3/deflate.c
==============================================================================
--- trunk/orca/packages/rrdtool-1.0.7.2/zlib-1.1.3/deflate.c (original)
+++ trunk/orca/packages/rrdtool-1.0.7.2/zlib-1.1.3/deflate.c Sat Jul 13 19:22:40 2002
@@ -0,0 +1,1350 @@
+/* deflate.c -- compress data using the deflation algorithm
+ * Copyright (C) 1995-1998 Jean-loup Gailly.
+ * For conditions of distribution and use, see copyright notice in zlib.h
+ */
+
+/*
+ * ALGORITHM
+ *
+ * The "deflation" process depends on being able to identify portions
+ * of the input text which are identical to earlier input (within a
+ * sliding window trailing behind the input currently being processed).
+ *
+ * The most straightforward technique turns out to be the fastest for
+ * most input files: try all possible matches and select the longest.
+ * The key feature of this algorithm is that insertions into the string
+ * dictionary are very simple and thus fast, and deletions are avoided
+ * completely. Insertions are performed at each input character, whereas
+ * string matches are performed only when the previous match ends. So it
+ * is preferable to spend more time in matches to allow very fast string
+ * insertions and avoid deletions. The matching algorithm for small
+ * strings is inspired from that of Rabin & Karp. A brute force approach
+ * is used to find longer strings when a small match has been found.
+ * A similar algorithm is used in comic (by Jan-Mark Wams) and freeze
+ * (by Leonid Broukhis).
+ * A previous version of this file used a more sophisticated algorithm
+ * (by Fiala and Greene) which is guaranteed to run in linear amortized
+ * time, but has a larger average cost, uses more memory and is patented.
+ * However the F&G algorithm may be faster for some highly redundant
+ * files if the parameter max_chain_length (described below) is too large.
+ *
+ * ACKNOWLEDGEMENTS
+ *
+ * The idea of lazy evaluation of matches is due to Jan-Mark Wams, and
+ * I found it in 'freeze' written by Leonid Broukhis.
+ * Thanks to many people for bug reports and testing.
+ *
+ * REFERENCES
+ *
+ * Deutsch, L.P.,"DEFLATE Compressed Data Format Specification".
+ * Available in ftp://ds.internic.net/rfc/rfc1951.txt
+ *
+ * A description of the Rabin and Karp algorithm is given in the book
+ * "Algorithms" by R. Sedgewick, Addison-Wesley, p252.
+ *
+ * Fiala,E.R., and Greene,D.H.
+ * Data Compression with Finite Windows, Comm.ACM, 32,4 (1989) 490-595
+ *
+ */
+
+/* @(#) $Id$ */
+
+#include "deflate.h"
+
+const char deflate_copyright[] =
+ " deflate 1.1.3 Copyright 1995-1998 Jean-loup Gailly ";
+/*
+ If you use the zlib library in a product, an acknowledgment is welcome
+ in the documentation of your product. If for some reason you cannot
+ include such an acknowledgment, I would appreciate that you keep this
+ copyright string in the executable of your product.
+ */
+
+/* ===========================================================================
+ * Function prototypes.
+ */
+typedef enum {
+ need_more, /* block not completed, need more input or more output */
+ block_done, /* block flush performed */
+ finish_started, /* finish started, need only more output at next deflate */
+ finish_done /* finish done, accept no more input or output */
+} block_state;
+
+typedef block_state (*compress_func) OF((deflate_state *s, int flush));
+/* Compression function. Returns the block state after the call. */
+
+local void fill_window OF((deflate_state *s));
+local block_state deflate_stored OF((deflate_state *s, int flush));
+local block_state deflate_fast OF((deflate_state *s, int flush));
+local block_state deflate_slow OF((deflate_state *s, int flush));
+local void lm_init OF((deflate_state *s));
+local void putShortMSB OF((deflate_state *s, uInt b));
+local void flush_pending OF((z_streamp strm));
+local int read_buf OF((z_streamp strm, Bytef *buf, unsigned size));
+#ifdef ASMV
+ void match_init OF((void)); /* asm code initialization */
+ uInt longest_match OF((deflate_state *s, IPos cur_match));
+#else
+local uInt longest_match OF((deflate_state *s, IPos cur_match));
+#endif
+
+#ifdef DEBUG
+local void check_match OF((deflate_state *s, IPos start, IPos match,
+ int length));
+#endif
+
+/* ===========================================================================
+ * Local data
+ */
+
+#define NIL 0
+/* Tail of hash chains */
+
+#ifndef TOO_FAR
+# define TOO_FAR 4096
+#endif
+/* Matches of length 3 are discarded if their distance exceeds TOO_FAR */
+
+#define MIN_LOOKAHEAD (MAX_MATCH+MIN_MATCH+1)
+/* Minimum amount of lookahead, except at the end of the input file.
+ * See deflate.c for comments about the MIN_MATCH+1.
+ */
+
+/* Values for max_lazy_match, good_match and max_chain_length, depending on
+ * the desired pack level (0..9). The values given below have been tuned to
+ * exclude worst case performance for pathological files. Better values may be
+ * found for specific files.
+ */
+typedef struct config_s {
+ ush good_length; /* reduce lazy search above this match length */
+ ush max_lazy; /* do not perform lazy search above this match length */
+ ush nice_length; /* quit search above this match length */
+ ush max_chain;
+ compress_func func;
+} config;
+
+local const config configuration_table[10] = {
+/* good lazy nice chain */
+/* 0 */ {0, 0, 0, 0, deflate_stored}, /* store only */
+/* 1 */ {4, 4, 8, 4, deflate_fast}, /* maximum speed, no lazy matches */
+/* 2 */ {4, 5, 16, 8, deflate_fast},
+/* 3 */ {4, 6, 32, 32, deflate_fast},
+
+/* 4 */ {4, 4, 16, 16, deflate_slow}, /* lazy matches */
+/* 5 */ {8, 16, 32, 32, deflate_slow},
+/* 6 */ {8, 16, 128, 128, deflate_slow},
+/* 7 */ {8, 32, 128, 256, deflate_slow},
+/* 8 */ {32, 128, 258, 1024, deflate_slow},
+/* 9 */ {32, 258, 258, 4096, deflate_slow}}; /* maximum compression */
+
+/* Note: the deflate() code requires max_lazy >= MIN_MATCH and max_chain >= 4
+ * For deflate_fast() (levels <= 3) good is ignored and lazy has a different
+ * meaning.
+ */
+
+#define EQUAL 0
+/* result of memcmp for equal strings */
+
+struct static_tree_desc_s {int dummy;}; /* for buggy compilers */
+
+/* ===========================================================================
+ * Update a hash value with the given input byte
+ * IN assertion: all calls to to UPDATE_HASH are made with consecutive
+ * input characters, so that a running hash key can be computed from the
+ * previous key instead of complete recalculation each time.
+ */
+#define UPDATE_HASH(s,h,c) (h = (((h)<<s->hash_shift) ^ (c)) & s->hash_mask)
+
+
+/* ===========================================================================
+ * Insert string str in the dictionary and set match_head to the previous head
+ * of the hash chain (the most recent string with same hash key). Return
+ * the previous length of the hash chain.
+ * If this file is compiled with -DFASTEST, the compression level is forced
+ * to 1, and no hash chains are maintained.
+ * IN assertion: all calls to to INSERT_STRING are made with consecutive
+ * input characters and the first MIN_MATCH bytes of str are valid
+ * (except for the last MIN_MATCH-1 bytes of the input file).
+ */
+#ifdef FASTEST
+#define INSERT_STRING(s, str, match_head) \
+ (UPDATE_HASH(s, s->ins_h, s->window[(str) + (MIN_MATCH-1)]), \
+ match_head = s->head[s->ins_h], \
+ s->head[s->ins_h] = (Pos)(str))
+#else
+#define INSERT_STRING(s, str, match_head) \
+ (UPDATE_HASH(s, s->ins_h, s->window[(str) + (MIN_MATCH-1)]), \
+ s->prev[(str) & s->w_mask] = match_head = s->head[s->ins_h], \
+ s->head[s->ins_h] = (Pos)(str))
+#endif
+
+/* ===========================================================================
+ * Initialize the hash table (avoiding 64K overflow for 16 bit systems).
+ * prev[] will be initialized on the fly.
+ */
+#define CLEAR_HASH(s) \
+ s->head[s->hash_size-1] = NIL; \
+ zmemzero((Bytef *)s->head, (unsigned)(s->hash_size-1)*sizeof(*s->head));
+
+/* ========================================================================= */
+int ZEXPORT deflateInit_(strm, level, version, stream_size)
+ z_streamp strm;
+ int level;
+ const char *version;
+ int stream_size;
+{
+ return deflateInit2_(strm, level, Z_DEFLATED, MAX_WBITS, DEF_MEM_LEVEL,
+ Z_DEFAULT_STRATEGY, version, stream_size);
+ /* To do: ignore strm->next_in if we use it as window */
+}
+
+/* ========================================================================= */
+int ZEXPORT deflateInit2_(strm, level, method, windowBits, memLevel, strategy,
+ version, stream_size)
+ z_streamp strm;
+ int level;
+ int method;
+ int windowBits;
+ int memLevel;
+ int strategy;
+ const char *version;
+ int stream_size;
+{
+ deflate_state *s;
+ int noheader = 0;
+ static const char* my_version = ZLIB_VERSION;
+
+ ushf *overlay;
+ /* We overlay pending_buf and d_buf+l_buf. This works since the average
+ * output size for (length,distance) codes is <= 24 bits.
+ */
+
+ if (version == Z_NULL || version[0] != my_version[0] ||
+ stream_size != sizeof(z_stream)) {
+ return Z_VERSION_ERROR;
+ }
+ if (strm == Z_NULL) return Z_STREAM_ERROR;
+
+ strm->msg = Z_NULL;
+ if (strm->zalloc == Z_NULL) {
+ strm->zalloc = zcalloc;
+ strm->opaque = (voidpf)0;
+ }
+ if (strm->zfree == Z_NULL) strm->zfree = zcfree;
+
+ if (level == Z_DEFAULT_COMPRESSION) level = 6;
+#ifdef FASTEST
+ level = 1;
+#endif
+
+ if (windowBits < 0) { /* undocumented feature: suppress zlib header */
+ noheader = 1;
+ windowBits = -windowBits;
+ }
+ if (memLevel < 1 || memLevel > MAX_MEM_LEVEL || method != Z_DEFLATED ||
+ windowBits < 8 || windowBits > 15 || level < 0 || level > 9 ||
+ strategy < 0 || strategy > Z_HUFFMAN_ONLY) {
+ return Z_STREAM_ERROR;
+ }
+ s = (deflate_state *) ZALLOC(strm, 1, sizeof(deflate_state));
+ if (s == Z_NULL) return Z_MEM_ERROR;
+ strm->state = (struct internal_state FAR *)s;
+ s->strm = strm;
+
+ s->noheader = noheader;
+ s->w_bits = windowBits;
+ s->w_size = 1 << s->w_bits;
+ s->w_mask = s->w_size - 1;
+
+ s->hash_bits = memLevel + 7;
+ s->hash_size = 1 << s->hash_bits;
+ s->hash_mask = s->hash_size - 1;
+ s->hash_shift = ((s->hash_bits+MIN_MATCH-1)/MIN_MATCH);
+
+ s->window = (Bytef *) ZALLOC(strm, s->w_size, 2*sizeof(Byte));
+ s->prev = (Posf *) ZALLOC(strm, s->w_size, sizeof(Pos));
+ s->head = (Posf *) ZALLOC(strm, s->hash_size, sizeof(Pos));
+
+ s->lit_bufsize = 1 << (memLevel + 6); /* 16K elements by default */
+
+ overlay = (ushf *) ZALLOC(strm, s->lit_bufsize, sizeof(ush)+2);
+ s->pending_buf = (uchf *) overlay;
+ s->pending_buf_size = (ulg)s->lit_bufsize * (sizeof(ush)+2L);
+
+ if (s->window == Z_NULL || s->prev == Z_NULL || s->head == Z_NULL ||
+ s->pending_buf == Z_NULL) {
+ strm->msg = (char*)ERR_MSG(Z_MEM_ERROR);
+ deflateEnd (strm);
+ return Z_MEM_ERROR;
+ }
+ s->d_buf = overlay + s->lit_bufsize/sizeof(ush);
+ s->l_buf = s->pending_buf + (1+sizeof(ush))*s->lit_bufsize;
+
+ s->level = level;
+ s->strategy = strategy;
+ s->method = (Byte)method;
+
+ return deflateReset(strm);
+}
+
+/* ========================================================================= */
+int ZEXPORT deflateSetDictionary (strm, dictionary, dictLength)
+ z_streamp strm;
+ const Bytef *dictionary;
+ uInt dictLength;
+{
+ deflate_state *s;
+ uInt length = dictLength;
+ uInt n;
+ IPos hash_head = 0;
+
+ if (strm == Z_NULL || strm->state == Z_NULL || dictionary == Z_NULL ||
+ strm->state->status != INIT_STATE) return Z_STREAM_ERROR;
+
+ s = strm->state;
+ strm->adler = adler32(strm->adler, dictionary, dictLength);
+
+ if (length < MIN_MATCH) return Z_OK;
+ if (length > MAX_DIST(s)) {
+ length = MAX_DIST(s);
+#ifndef USE_DICT_HEAD
+ dictionary += dictLength - length; /* use the tail of the dictionary */
+#endif
+ }
+ zmemcpy(s->window, dictionary, length);
+ s->strstart = length;
+ s->block_start = (long)length;
+
+ /* Insert all strings in the hash table (except for the last two bytes).
+ * s->lookahead stays null, so s->ins_h will be recomputed at the next
+ * call of fill_window.
+ */
+ s->ins_h = s->window[0];
+ UPDATE_HASH(s, s->ins_h, s->window[1]);
+ for (n = 0; n <= length - MIN_MATCH; n++) {
+ INSERT_STRING(s, n, hash_head);
+ }
+ if (hash_head) hash_head = 0; /* to make compiler happy */
+ return Z_OK;
+}
+
+/* ========================================================================= */
+int ZEXPORT deflateReset (strm)
+ z_streamp strm;
+{
+ deflate_state *s;
+
+ if (strm == Z_NULL || strm->state == Z_NULL ||
+ strm->zalloc == Z_NULL || strm->zfree == Z_NULL) return Z_STREAM_ERROR;
+
+ strm->total_in = strm->total_out = 0;
+ strm->msg = Z_NULL; /* use zfree if we ever allocate msg dynamically */
+ strm->data_type = Z_UNKNOWN;
+
+ s = (deflate_state *)strm->state;
+ s->pending = 0;
+ s->pending_out = s->pending_buf;
+
+ if (s->noheader < 0) {
+ s->noheader = 0; /* was set to -1 by deflate(..., Z_FINISH); */
+ }
+ s->status = s->noheader ? BUSY_STATE : INIT_STATE;
+ strm->adler = 1;
+ s->last_flush = Z_NO_FLUSH;
+
+ _tr_init(s);
+ lm_init(s);
+
+ return Z_OK;
+}
+
+/* ========================================================================= */
+int ZEXPORT deflateParams(strm, level, strategy)
+ z_streamp strm;
+ int level;
+ int strategy;
+{
+ deflate_state *s;
+ compress_func func;
+ int err = Z_OK;
+
+ if (strm == Z_NULL || strm->state == Z_NULL) return Z_STREAM_ERROR;
+ s = strm->state;
+
+ if (level == Z_DEFAULT_COMPRESSION) {
+ level = 6;
+ }
+ if (level < 0 || level > 9 || strategy < 0 || strategy > Z_HUFFMAN_ONLY) {
+ return Z_STREAM_ERROR;
+ }
+ func = configuration_table[s->level].func;
+
+ if (func != configuration_table[level].func && strm->total_in != 0) {
+ /* Flush the last buffer: */
+ err = deflate(strm, Z_PARTIAL_FLUSH);
+ }
+ if (s->level != level) {
+ s->level = level;
+ s->max_lazy_match = configuration_table[level].max_lazy;
+ s->good_match = configuration_table[level].good_length;
+ s->nice_match = configuration_table[level].nice_length;
+ s->max_chain_length = configuration_table[level].max_chain;
+ }
+ s->strategy = strategy;
+ return err;
+}
+
+/* =========================================================================
+ * Put a short in the pending buffer. The 16-bit value is put in MSB order.
+ * IN assertion: the stream state is correct and there is enough room in
+ * pending_buf.
+ */
+local void putShortMSB (s, b)
+ deflate_state *s;
+ uInt b;
+{
+ put_byte(s, (Byte)(b >> 8));
+ put_byte(s, (Byte)(b & 0xff));
+}
+
+/* =========================================================================
+ * Flush as much pending output as possible. All deflate() output goes
+ * through this function so some applications may wish to modify it
+ * to avoid allocating a large strm->next_out buffer and copying into it.
+ * (See also read_buf()).
+ */
+local void flush_pending(strm)
+ z_streamp strm;
+{
+ unsigned len = strm->state->pending;
+
+ if (len > strm->avail_out) len = strm->avail_out;
+ if (len == 0) return;
+
+ zmemcpy(strm->next_out, strm->state->pending_out, len);
+ strm->next_out += len;
+ strm->state->pending_out += len;
+ strm->total_out += len;
+ strm->avail_out -= len;
+ strm->state->pending -= len;
+ if (strm->state->pending == 0) {
+ strm->state->pending_out = strm->state->pending_buf;
+ }
+}
+
+/* ========================================================================= */
+int ZEXPORT deflate (strm, flush)
+ z_streamp strm;
+ int flush;
+{
+ int old_flush; /* value of flush param for previous deflate call */
+ deflate_state *s;
+
+ if (strm == Z_NULL || strm->state == Z_NULL ||
+ flush > Z_FINISH || flush < 0) {
+ return Z_STREAM_ERROR;
+ }
+ s = strm->state;
+
+ if (strm->next_out == Z_NULL ||
+ (strm->next_in == Z_NULL && strm->avail_in != 0) ||
+ (s->status == FINISH_STATE && flush != Z_FINISH)) {
+ ERR_RETURN(strm, Z_STREAM_ERROR);
+ }
+ if (strm->avail_out == 0) ERR_RETURN(strm, Z_BUF_ERROR);
+
+ s->strm = strm; /* just in case */
+ old_flush = s->last_flush;
+ s->last_flush = flush;
+
+ /* Write the zlib header */
+ if (s->status == INIT_STATE) {
+
+ uInt header = (Z_DEFLATED + ((s->w_bits-8)<<4)) << 8;
+ uInt level_flags = (s->level-1) >> 1;
+
+ if (level_flags > 3) level_flags = 3;
+ header |= (level_flags << 6);
+ if (s->strstart != 0) header |= PRESET_DICT;
+ header += 31 - (header % 31);
+
+ s->status = BUSY_STATE;
+ putShortMSB(s, header);
+
+ /* Save the adler32 of the preset dictionary: */
+ if (s->strstart != 0) {
+ putShortMSB(s, (uInt)(strm->adler >> 16));
+ putShortMSB(s, (uInt)(strm->adler & 0xffff));
+ }
+ strm->adler = 1L;
+ }
+
+ /* Flush as much pending output as possible */
+ if (s->pending != 0) {
+ flush_pending(strm);
+ if (strm->avail_out == 0) {
+ /* Since avail_out is 0, deflate will be called again with
+ * more output space, but possibly with both pending and
+ * avail_in equal to zero. There won't be anything to do,
+ * but this is not an error situation so make sure we
+ * return OK instead of BUF_ERROR at next call of deflate:
+ */
+ s->last_flush = -1;
+ return Z_OK;
+ }
+
+ /* Make sure there is something to do and avoid duplicate consecutive
+ * flushes. For repeated and useless calls with Z_FINISH, we keep
+ * returning Z_STREAM_END instead of Z_BUFF_ERROR.
+ */
+ } else if (strm->avail_in == 0 && flush <= old_flush &&
+ flush != Z_FINISH) {
+ ERR_RETURN(strm, Z_BUF_ERROR);
+ }
+
+ /* User must not provide more input after the first FINISH: */
+ if (s->status == FINISH_STATE && strm->avail_in != 0) {
+ ERR_RETURN(strm, Z_BUF_ERROR);
+ }
+
+ /* Start a new block or continue the current one.
+ */
+ if (strm->avail_in != 0 || s->lookahead != 0 ||
+ (flush != Z_NO_FLUSH && s->status != FINISH_STATE)) {
+ block_state bstate;
+
+ bstate = (*(configuration_table[s->level].func))(s, flush);
+
+ if (bstate == finish_started || bstate == finish_done) {
+ s->status = FINISH_STATE;
+ }
+ if (bstate == need_more || bstate == finish_started) {
+ if (strm->avail_out == 0) {
+ s->last_flush = -1; /* avoid BUF_ERROR next call, see above */
+ }
+ return Z_OK;
+ /* If flush != Z_NO_FLUSH && avail_out == 0, the next call
+ * of deflate should use the same flush parameter to make sure
+ * that the flush is complete. So we don't have to output an
+ * empty block here, this will be done at next call. This also
+ * ensures that for a very small output buffer, we emit at most
+ * one empty block.
+ */
+ }
+ if (bstate == block_done) {
+ if (flush == Z_PARTIAL_FLUSH) {
+ _tr_align(s);
+ } else { /* FULL_FLUSH or SYNC_FLUSH */
+ _tr_stored_block(s, (char*)0, 0L, 0);
+ /* For a full flush, this empty block will be recognized
+ * as a special marker by inflate_sync().
+ */
+ if (flush == Z_FULL_FLUSH) {
+ CLEAR_HASH(s); /* forget history */
+ }
+ }
+ flush_pending(strm);
+ if (strm->avail_out == 0) {
+ s->last_flush = -1; /* avoid BUF_ERROR at next call, see above */
+ return Z_OK;
+ }
+ }
+ }
+ Assert(strm->avail_out > 0, "bug2");
+
+ if (flush != Z_FINISH) return Z_OK;
+ if (s->noheader) return Z_STREAM_END;
+
+ /* Write the zlib trailer (adler32) */
+ putShortMSB(s, (uInt)(strm->adler >> 16));
+ putShortMSB(s, (uInt)(strm->adler & 0xffff));
+ flush_pending(strm);
+ /* If avail_out is zero, the application will call deflate again
+ * to flush the rest.
+ */
+ s->noheader = -1; /* write the trailer only once! */
+ return s->pending != 0 ? Z_OK : Z_STREAM_END;
+}
+
+/* ========================================================================= */
+int ZEXPORT deflateEnd (strm)
+ z_streamp strm;
+{
+ int status;
+
+ if (strm == Z_NULL || strm->state == Z_NULL) return Z_STREAM_ERROR;
+
+ status = strm->state->status;
+ if (status != INIT_STATE && status != BUSY_STATE &&
+ status != FINISH_STATE) {
+ return Z_STREAM_ERROR;
+ }
+
+ /* Deallocate in reverse order of allocations: */
+ TRY_FREE(strm, strm->state->pending_buf);
+ TRY_FREE(strm, strm->state->head);
+ TRY_FREE(strm, strm->state->prev);
+ TRY_FREE(strm, strm->state->window);
+
+ ZFREE(strm, strm->state);
+ strm->state = Z_NULL;
+
+ return status == BUSY_STATE ? Z_DATA_ERROR : Z_OK;
+}
+
+/* =========================================================================
+ * Copy the source state to the destination state.
+ * To simplify the source, this is not supported for 16-bit MSDOS (which
+ * doesn't have enough memory anyway to duplicate compression states).
+ */
+int ZEXPORT deflateCopy (dest, source)
+ z_streamp dest;
+ z_streamp source;
+{
+#ifdef MAXSEG_64K
+ return Z_STREAM_ERROR;
+#else
+ deflate_state *ds;
+ deflate_state *ss;
+ ushf *overlay;
+
+
+ if (source == Z_NULL || dest == Z_NULL || source->state == Z_NULL) {
+ return Z_STREAM_ERROR;
+ }
+
+ ss = source->state;
+
+ *dest = *source;
+
+ ds = (deflate_state *) ZALLOC(dest, 1, sizeof(deflate_state));
+ if (ds == Z_NULL) return Z_MEM_ERROR;
+ dest->state = (struct internal_state FAR *) ds;
+ *ds = *ss;
+ ds->strm = dest;
+
+ ds->window = (Bytef *) ZALLOC(dest, ds->w_size, 2*sizeof(Byte));
+ ds->prev = (Posf *) ZALLOC(dest, ds->w_size, sizeof(Pos));
+ ds->head = (Posf *) ZALLOC(dest, ds->hash_size, sizeof(Pos));
+ overlay = (ushf *) ZALLOC(dest, ds->lit_bufsize, sizeof(ush)+2);
+ ds->pending_buf = (uchf *) overlay;
+
+ if (ds->window == Z_NULL || ds->prev == Z_NULL || ds->head == Z_NULL ||
+ ds->pending_buf == Z_NULL) {
+ deflateEnd (dest);
+ return Z_MEM_ERROR;
+ }
+ /* following zmemcpy do not work for 16-bit MSDOS */
+ zmemcpy(ds->window, ss->window, ds->w_size * 2 * sizeof(Byte));
+ zmemcpy(ds->prev, ss->prev, ds->w_size * sizeof(Pos));
+ zmemcpy(ds->head, ss->head, ds->hash_size * sizeof(Pos));
+ zmemcpy(ds->pending_buf, ss->pending_buf, (uInt)ds->pending_buf_size);
+
+ ds->pending_out = ds->pending_buf + (ss->pending_out - ss->pending_buf);
+ ds->d_buf = overlay + ds->lit_bufsize/sizeof(ush);
+ ds->l_buf = ds->pending_buf + (1+sizeof(ush))*ds->lit_bufsize;
+
+ ds->l_desc.dyn_tree = ds->dyn_ltree;
+ ds->d_desc.dyn_tree = ds->dyn_dtree;
+ ds->bl_desc.dyn_tree = ds->bl_tree;
+
+ return Z_OK;
+#endif
+}
+
+/* ===========================================================================
+ * Read a new buffer from the current input stream, update the adler32
+ * and total number of bytes read. All deflate() input goes through
+ * this function so some applications may wish to modify it to avoid
+ * allocating a large strm->next_in buffer and copying from it.
+ * (See also flush_pending()).
+ */
+local int read_buf(strm, buf, size)
+ z_streamp strm;
+ Bytef *buf;
+ unsigned size;
+{
+ unsigned len = strm->avail_in;
+
+ if (len > size) len = size;
+ if (len == 0) return 0;
+
+ strm->avail_in -= len;
+
+ if (!strm->state->noheader) {
+ strm->adler = adler32(strm->adler, strm->next_in, len);
+ }
+ zmemcpy(buf, strm->next_in, len);
+ strm->next_in += len;
+ strm->total_in += len;
+
+ return (int)len;
+}
+
+/* ===========================================================================
+ * Initialize the "longest match" routines for a new zlib stream
+ */
+local void lm_init (s)
+ deflate_state *s;
+{
+ s->window_size = (ulg)2L*s->w_size;
+
+ CLEAR_HASH(s);
+
+ /* Set the default configuration parameters:
+ */
+ s->max_lazy_match = configuration_table[s->level].max_lazy;
+ s->good_match = configuration_table[s->level].good_length;
+ s->nice_match = configuration_table[s->level].nice_length;
+ s->max_chain_length = configuration_table[s->level].max_chain;
+
+ s->strstart = 0;
+ s->block_start = 0L;
+ s->lookahead = 0;
+ s->match_length = s->prev_length = MIN_MATCH-1;
+ s->match_available = 0;
+ s->ins_h = 0;
+#ifdef ASMV
+ match_init(); /* initialize the asm code */
+#endif
+}
+
+/* ===========================================================================
+ * Set match_start to the longest match starting at the given string and
+ * return its length. Matches shorter or equal to prev_length are discarded,
+ * in which case the result is equal to prev_length and match_start is
+ * garbage.
+ * IN assertions: cur_match is the head of the hash chain for the current
+ * string (strstart) and its distance is <= MAX_DIST, and prev_length >= 1
+ * OUT assertion: the match length is not greater than s->lookahead.
+ */
+#ifndef ASMV
+/* For 80x86 and 680x0, an optimized version will be provided in match.asm or
+ * match.S. The code will be functionally equivalent.
+ */
+#ifndef FASTEST
+local uInt longest_match(s, cur_match)
+ deflate_state *s;
+ IPos cur_match; /* current match */
+{
+ unsigned chain_length = s->max_chain_length;/* max hash chain length */
+ register Bytef *scan = s->window + s->strstart; /* current string */
+ register Bytef *match; /* matched string */
+ register int len; /* length of current match */
+ int best_len = s->prev_length; /* best match length so far */
+ int nice_match = s->nice_match; /* stop if match long enough */
+ IPos limit = s->strstart > (IPos)MAX_DIST(s) ?
+ s->strstart - (IPos)MAX_DIST(s) : NIL;
+ /* Stop when cur_match becomes <= limit. To simplify the code,
+ * we prevent matches with the string of window index 0.
+ */
+ Posf *prev = s->prev;
+ uInt wmask = s->w_mask;
+
+#ifdef UNALIGNED_OK
+ /* Compare two bytes at a time. Note: this is not always beneficial.
+ * Try with and without -DUNALIGNED_OK to check.
+ */
+ register Bytef *strend = s->window + s->strstart + MAX_MATCH - 1;
+ register ush scan_start = *(ushf*)scan;
+ register ush scan_end = *(ushf*)(scan+best_len-1);
+#else
+ register Bytef *strend = s->window + s->strstart + MAX_MATCH;
+ register Byte scan_end1 = scan[best_len-1];
+ register Byte scan_end = scan[best_len];
+#endif
+
+ /* The code is optimized for HASH_BITS >= 8 and MAX_MATCH-2 multiple of 16.
+ * It is easy to get rid of this optimization if necessary.
+ */
+ Assert(s->hash_bits >= 8 && MAX_MATCH == 258, "Code too clever");
+
+ /* Do not waste too much time if we already have a good match: */
+ if (s->prev_length >= s->good_match) {
+ chain_length >>= 2;
+ }
+ /* Do not look for matches beyond the end of the input. This is necessary
+ * to make deflate deterministic.
+ */
+ if ((uInt)nice_match > s->lookahead) nice_match = s->lookahead;
+
+ Assert((ulg)s->strstart <= s->window_size-MIN_LOOKAHEAD, "need lookahead");
+
+ do {
+ Assert(cur_match < s->strstart, "no future");
+ match = s->window + cur_match;
+
+ /* Skip to next match if the match length cannot increase
+ * or if the match length is less than 2:
+ */
+#if (defined(UNALIGNED_OK) && MAX_MATCH == 258)
+ /* This code assumes sizeof(unsigned short) == 2. Do not use
+ * UNALIGNED_OK if your compiler uses a different size.
+ */
+ if (*(ushf*)(match+best_len-1) != scan_end ||
+ *(ushf*)match != scan_start) continue;
+
+ /* It is not necessary to compare scan[2] and match[2] since they are
+ * always equal when the other bytes match, given that the hash keys
+ * are equal and that HASH_BITS >= 8. Compare 2 bytes at a time at
+ * strstart+3, +5, ... up to strstart+257. We check for insufficient
+ * lookahead only every 4th comparison; the 128th check will be made
+ * at strstart+257. If MAX_MATCH-2 is not a multiple of 8, it is
+ * necessary to put more guard bytes at the end of the window, or
+ * to check more often for insufficient lookahead.
+ */
+ Assert(scan[2] == match[2], "scan[2]?");
+ scan++, match++;
+ do {
+ } while (*(ushf*)(scan+=2) == *(ushf*)(match+=2) &&
+ *(ushf*)(scan+=2) == *(ushf*)(match+=2) &&
+ *(ushf*)(scan+=2) == *(ushf*)(match+=2) &&
+ *(ushf*)(scan+=2) == *(ushf*)(match+=2) &&
+ scan < strend);
+ /* The funny "do {}" generates better code on most compilers */
+
+ /* Here, scan <= window+strstart+257 */
+ Assert(scan <= s->window+(unsigned)(s->window_size-1), "wild scan");
+ if (*scan == *match) scan++;
+
+ len = (MAX_MATCH - 1) - (int)(strend-scan);
+ scan = strend - (MAX_MATCH-1);
+
+#else /* UNALIGNED_OK */
+
+ if (match[best_len] != scan_end ||
+ match[best_len-1] != scan_end1 ||
+ *match != *scan ||
+ *++match != scan[1]) continue;
+
+ /* The check at best_len-1 can be removed because it will be made
+ * again later. (This heuristic is not always a win.)
+ * It is not necessary to compare scan[2] and match[2] since they
+ * are always equal when the other bytes match, given that
+ * the hash keys are equal and that HASH_BITS >= 8.
+ */
+ scan += 2, match++;
+ Assert(*scan == *match, "match[2]?");
+
+ /* We check for insufficient lookahead only every 8th comparison;
+ * the 256th check will be made at strstart+258.
+ */
+ do {
+ } while (*++scan == *++match && *++scan == *++match &&
+ *++scan == *++match && *++scan == *++match &&
+ *++scan == *++match && *++scan == *++match &&
+ *++scan == *++match && *++scan == *++match &&
+ scan < strend);
+
+ Assert(scan <= s->window+(unsigned)(s->window_size-1), "wild scan");
+
+ len = MAX_MATCH - (int)(strend - scan);
+ scan = strend - MAX_MATCH;
+
+#endif /* UNALIGNED_OK */
+
+ if (len > best_len) {
+ s->match_start = cur_match;
+ best_len = len;
+ if (len >= nice_match) break;
+#ifdef UNALIGNED_OK
+ scan_end = *(ushf*)(scan+best_len-1);
+#else
+ scan_end1 = scan[best_len-1];
+ scan_end = scan[best_len];
+#endif
+ }
+ } while ((cur_match = prev[cur_match & wmask]) > limit
+ && --chain_length != 0);
+
+ if ((uInt)best_len <= s->lookahead) return (uInt)best_len;
+ return s->lookahead;
+}
+
+#else /* FASTEST */
+/* ---------------------------------------------------------------------------
+ * Optimized version for level == 1 only
+ */
+local uInt longest_match(s, cur_match)
+ deflate_state *s;
+ IPos cur_match; /* current match */
+{
+ register Bytef *scan = s->window + s->strstart; /* current string */
+ register Bytef *match; /* matched string */
+ register int len; /* length of current match */
+ register Bytef *strend = s->window + s->strstart + MAX_MATCH;
+
+ /* The code is optimized for HASH_BITS >= 8 and MAX_MATCH-2 multiple of 16.
+ * It is easy to get rid of this optimization if necessary.
+ */
+ Assert(s->hash_bits >= 8 && MAX_MATCH == 258, "Code too clever");
+
+ Assert((ulg)s->strstart <= s->window_size-MIN_LOOKAHEAD, "need lookahead");
+
+ Assert(cur_match < s->strstart, "no future");
+
+ match = s->window + cur_match;
+
+ /* Return failure if the match length is less than 2:
+ */
+ if (match[0] != scan[0] || match[1] != scan[1]) return MIN_MATCH-1;
+
+ /* The check at best_len-1 can be removed because it will be made
+ * again later. (This heuristic is not always a win.)
+ * It is not necessary to compare scan[2] and match[2] since they
+ * are always equal when the other bytes match, given that
+ * the hash keys are equal and that HASH_BITS >= 8.
+ */
+ scan += 2, match += 2;
+ Assert(*scan == *match, "match[2]?");
+
+ /* We check for insufficient lookahead only every 8th comparison;
+ * the 256th check will be made at strstart+258.
+ */
+ do {
+ } while (*++scan == *++match && *++scan == *++match &&
+ *++scan == *++match && *++scan == *++match &&
+ *++scan == *++match && *++scan == *++match &&
+ *++scan == *++match && *++scan == *++match &&
+ scan < strend);
+
+ Assert(scan <= s->window+(unsigned)(s->window_size-1), "wild scan");
+
+ len = MAX_MATCH - (int)(strend - scan);
+
+ if (len < MIN_MATCH) return MIN_MATCH - 1;
+
+ s->match_start = cur_match;
+ return len <= s->lookahead ? len : s->lookahead;
+}
+#endif /* FASTEST */
+#endif /* ASMV */
+
+#ifdef DEBUG
+/* ===========================================================================
+ * Check that the match at match_start is indeed a match.
+ */
+local void check_match(s, start, match, length)
+ deflate_state *s;
+ IPos start, match;
+ int length;
+{
+ /* check that the match is indeed a match */
+ if (zmemcmp(s->window + match,
+ s->window + start, length) != EQUAL) {
+ fprintf(stderr, " start %u, match %u, length %d\n",
+ start, match, length);
+ do {
+ fprintf(stderr, "%c%c", s->window[match++], s->window[start++]);
+ } while (--length != 0);
+ z_error("invalid match");
+ }
+ if (z_verbose > 1) {
+ fprintf(stderr,"\\[%d,%d]", start-match, length);
+ do { putc(s->window[start++], stderr); } while (--length != 0);
+ }
+}
+#else
+# define check_match(s, start, match, length)
+#endif
+
+/* ===========================================================================
+ * Fill the window when the lookahead becomes insufficient.
+ * Updates strstart and lookahead.
+ *
+ * IN assertion: lookahead < MIN_LOOKAHEAD
+ * OUT assertions: strstart <= window_size-MIN_LOOKAHEAD
+ * At least one byte has been read, or avail_in == 0; reads are
+ * performed for at least two bytes (required for the zip translate_eol
+ * option -- not supported here).
+ */
+local void fill_window(s)
+ deflate_state *s;
+{
+ register unsigned n, m;
+ register Posf *p;
+ unsigned more; /* Amount of free space at the end of the window. */
+ uInt wsize = s->w_size;
+
+ do {
+ more = (unsigned)(s->window_size -(ulg)s->lookahead -(ulg)s->strstart);
+
+ /* Deal with !@#$% 64K limit: */
+ if (more == 0 && s->strstart == 0 && s->lookahead == 0) {
+ more = wsize;
+
+ } else if (more == (unsigned)(-1)) {
+ /* Very unlikely, but possible on 16 bit machine if strstart == 0
+ * and lookahead == 1 (input done one byte at time)
+ */
+ more--;
+
+ /* If the window is almost full and there is insufficient lookahead,
+ * move the upper half to the lower one to make room in the upper half.
+ */
+ } else if (s->strstart >= wsize+MAX_DIST(s)) {
+
+ zmemcpy(s->window, s->window+wsize, (unsigned)wsize);
+ s->match_start -= wsize;
+ s->strstart -= wsize; /* we now have strstart >= MAX_DIST */
+ s->block_start -= (long) wsize;
+
+ /* Slide the hash table (could be avoided with 32 bit values
+ at the expense of memory usage). We slide even when level == 0
+ to keep the hash table consistent if we switch back to level > 0
+ later. (Using level 0 permanently is not an optimal usage of
+ zlib, so we don't care about this pathological case.)
+ */
+ n = s->hash_size;
+ p = &s->head[n];
+ do {
+ m = *--p;
+ *p = (Pos)(m >= wsize ? m-wsize : NIL);
+ } while (--n);
+
+ n = wsize;
+#ifndef FASTEST
+ p = &s->prev[n];
+ do {
+ m = *--p;
+ *p = (Pos)(m >= wsize ? m-wsize : NIL);
+ /* If n is not on any hash chain, prev[n] is garbage but
+ * its value will never be used.
+ */
+ } while (--n);
+#endif
+ more += wsize;
+ }
+ if (s->strm->avail_in == 0) return;
+
+ /* If there was no sliding:
+ * strstart <= WSIZE+MAX_DIST-1 && lookahead <= MIN_LOOKAHEAD - 1 &&
+ * more == window_size - lookahead - strstart
+ * => more >= window_size - (MIN_LOOKAHEAD-1 + WSIZE + MAX_DIST-1)
+ * => more >= window_size - 2*WSIZE + 2
+ * In the BIG_MEM or MMAP case (not yet supported),
+ * window_size == input_size + MIN_LOOKAHEAD &&
+ * strstart + s->lookahead <= input_size => more >= MIN_LOOKAHEAD.
+ * Otherwise, window_size == 2*WSIZE so more >= 2.
+ * If there was sliding, more >= WSIZE. So in all cases, more >= 2.
+ */
+ Assert(more >= 2, "more < 2");
+
+ n = read_buf(s->strm, s->window + s->strstart + s->lookahead, more);
+ s->lookahead += n;
+
+ /* Initialize the hash value now that we have some input: */
+ if (s->lookahead >= MIN_MATCH) {
+ s->ins_h = s->window[s->strstart];
+ UPDATE_HASH(s, s->ins_h, s->window[s->strstart+1]);
+#if MIN_MATCH != 3
+ Call UPDATE_HASH() MIN_MATCH-3 more times
+#endif
+ }
+ /* If the whole input has less than MIN_MATCH bytes, ins_h is garbage,
+ * but this is not important since only literal bytes will be emitted.
+ */
+
+ } while (s->lookahead < MIN_LOOKAHEAD && s->strm->avail_in != 0);
+}
+
+/* ===========================================================================
+ * Flush the current block, with given end-of-file flag.
+ * IN assertion: strstart is set to the end of the current match.
+ */
+#define FLUSH_BLOCK_ONLY(s, eof) { \
+ _tr_flush_block(s, (s->block_start >= 0L ? \
+ (charf *)&s->window[(unsigned)s->block_start] : \
+ (charf *)Z_NULL), \
+ (ulg)((long)s->strstart - s->block_start), \
+ (eof)); \
+ s->block_start = s->strstart; \
+ flush_pending(s->strm); \
+ Tracev((stderr,"[FLUSH]")); \
+}
+
+/* Same but force premature exit if necessary. */
+#define FLUSH_BLOCK(s, eof) { \
+ FLUSH_BLOCK_ONLY(s, eof); \
+ if (s->strm->avail_out == 0) return (eof) ? finish_started : need_more; \
+}
+
+/* ===========================================================================
+ * Copy without compression as much as possible from the input stream, return
+ * the current block state.
+ * This function does not insert new strings in the dictionary since
+ * uncompressible data is probably not useful. This function is used
+ * only for the level=0 compression option.
+ * NOTE: this function should be optimized to avoid extra copying from
+ * window to pending_buf.
+ */
+local block_state deflate_stored(s, flush)
+ deflate_state *s;
+ int flush;
+{
+ /* Stored blocks are limited to 0xffff bytes, pending_buf is limited
+ * to pending_buf_size, and each stored block has a 5 byte header:
+ */
+ ulg max_block_size = 0xffff;
+ ulg max_start;
+
+ if (max_block_size > s->pending_buf_size - 5) {
+ max_block_size = s->pending_buf_size - 5;
+ }
+
+ /* Copy as much as possible from input to output: */
+ for (;;) {
+ /* Fill the window as much as possible: */
+ if (s->lookahead <= 1) {
+
+ Assert(s->strstart < s->w_size+MAX_DIST(s) ||
+ s->block_start >= (long)s->w_size, "slide too late");
+
+ fill_window(s);
+ if (s->lookahead == 0 && flush == Z_NO_FLUSH) return need_more;
+
+ if (s->lookahead == 0) break; /* flush the current block */
+ }
+ Assert(s->block_start >= 0L, "block gone");
+
+ s->strstart += s->lookahead;
+ s->lookahead = 0;
+
+ /* Emit a stored block if pending_buf will be full: */
+ max_start = s->block_start + max_block_size;
+ if (s->strstart == 0 || (ulg)s->strstart >= max_start) {
+ /* strstart == 0 is possible when wraparound on 16-bit machine */
+ s->lookahead = (uInt)(s->strstart - max_start);
+ s->strstart = (uInt)max_start;
+ FLUSH_BLOCK(s, 0);
+ }
+ /* Flush if we may have to slide, otherwise block_start may become
+ * negative and the data will be gone:
+ */
+ if (s->strstart - (uInt)s->block_start >= MAX_DIST(s)) {
+ FLUSH_BLOCK(s, 0);
+ }
+ }
+ FLUSH_BLOCK(s, flush == Z_FINISH);
+ return flush == Z_FINISH ? finish_done : block_done;
+}
+
+/* ===========================================================================
+ * Compress as much as possible from the input stream, return the current
+ * block state.
+ * This function does not perform lazy evaluation of matches and inserts
+ * new strings in the dictionary only for unmatched strings or for short
+ * matches. It is used only for the fast compression options.
+ */
+local block_state deflate_fast(s, flush)
+ deflate_state *s;
+ int flush;
+{
+ IPos hash_head = NIL; /* head of the hash chain */
+ int bflush; /* set if current block must be flushed */
+
+ for (;;) {
+ /* Make sure that we always have enough lookahead, except
+ * at the end of the input file. We need MAX_MATCH bytes
+ * for the next match, plus MIN_MATCH bytes to insert the
+ * string following the next match.
+ */
+ if (s->lookahead < MIN_LOOKAHEAD) {
+ fill_window(s);
+ if (s->lookahead < MIN_LOOKAHEAD && flush == Z_NO_FLUSH) {
+ return need_more;
+ }
+ if (s->lookahead == 0) break; /* flush the current block */
+ }
+
+ /* Insert the string window[strstart .. strstart+2] in the
+ * dictionary, and set hash_head to the head of the hash chain:
+ */
+ if (s->lookahead >= MIN_MATCH) {
+ INSERT_STRING(s, s->strstart, hash_head);
+ }
+
+ /* Find the longest match, discarding those <= prev_length.
+ * At this point we have always match_length < MIN_MATCH
+ */
+ if (hash_head != NIL && s->strstart - hash_head <= MAX_DIST(s)) {
+ /* To simplify the code, we prevent matches with the string
+ * of window index 0 (in particular we have to avoid a match
+ * of the string with itself at the start of the input file).
+ */
+ if (s->strategy != Z_HUFFMAN_ONLY) {
+ s->match_length = longest_match (s, hash_head);
+ }
+ /* longest_match() sets match_start */
+ }
+ if (s->match_length >= MIN_MATCH) {
+ check_match(s, s->strstart, s->match_start, s->match_length);
+
+ _tr_tally_dist(s, s->strstart - s->match_start,
+ s->match_length - MIN_MATCH, bflush);
+
+ s->lookahead -= s->match_length;
+
+ /* Insert new strings in the hash table only if the match length
+ * is not too large. This saves time but degrades compression.
+ */
+#ifndef FASTEST
+ if (s->match_length <= s->max_insert_length &&
+ s->lookahead >= MIN_MATCH) {
+ s->match_length--; /* string at strstart already in hash table */
+ do {
+ s->strstart++;
+ INSERT_STRING(s, s->strstart, hash_head);
+ /* strstart never exceeds WSIZE-MAX_MATCH, so there are
+ * always MIN_MATCH bytes ahead.
+ */
+ } while (--s->match_length != 0);
+ s->strstart++;
+ } else
+#endif
+ {
+ s->strstart += s->match_length;
+ s->match_length = 0;
+ s->ins_h = s->window[s->strstart];
+ UPDATE_HASH(s, s->ins_h, s->window[s->strstart+1]);
+#if MIN_MATCH != 3
+ Call UPDATE_HASH() MIN_MATCH-3 more times
+#endif
+ /* If lookahead < MIN_MATCH, ins_h is garbage, but it does not
+ * matter since it will be recomputed at next deflate call.
+ */
+ }
+ } else {
+ /* No match, output a literal byte */
+ Tracevv((stderr,"%c", s->window[s->strstart]));
+ _tr_tally_lit (s, s->window[s->strstart], bflush);
+ s->lookahead--;
+ s->strstart++;
+ }
+ if (bflush) FLUSH_BLOCK(s, 0);
+ }
+ FLUSH_BLOCK(s, flush == Z_FINISH);
+ return flush == Z_FINISH ? finish_done : block_done;
+}
+
+/* ===========================================================================
+ * Same as above, but achieves better compression. We use a lazy
+ * evaluation for matches: a match is finally adopted only if there is
+ * no better match at the next window position.
+ */
+local block_state deflate_slow(s, flush)
+ deflate_state *s;
+ int flush;
+{
+ IPos hash_head = NIL; /* head of hash chain */
+ int bflush; /* set if current block must be flushed */
+
+ /* Process the input block. */
+ for (;;) {
+ /* Make sure that we always have enough lookahead, except
+ * at the end of the input file. We need MAX_MATCH bytes
+ * for the next match, plus MIN_MATCH bytes to insert the
+ * string following the next match.
+ */
+ if (s->lookahead < MIN_LOOKAHEAD) {
+ fill_window(s);
+ if (s->lookahead < MIN_LOOKAHEAD && flush == Z_NO_FLUSH) {
+ return need_more;
+ }
+ if (s->lookahead == 0) break; /* flush the current block */
+ }
+
+ /* Insert the string window[strstart .. strstart+2] in the
+ * dictionary, and set hash_head to the head of the hash chain:
+ */
+ if (s->lookahead >= MIN_MATCH) {
+ INSERT_STRING(s, s->strstart, hash_head);
+ }
+
+ /* Find the longest match, discarding those <= prev_length.
+ */
+ s->prev_length = s->match_length, s->prev_match = s->match_start;
+ s->match_length = MIN_MATCH-1;
+
+ if (hash_head != NIL && s->prev_length < s->max_lazy_match &&
+ s->strstart - hash_head <= MAX_DIST(s)) {
+ /* To simplify the code, we prevent matches with the string
+ * of window index 0 (in particular we have to avoid a match
+ * of the string with itself at the start of the input file).
+ */
+ if (s->strategy != Z_HUFFMAN_ONLY) {
+ s->match_length = longest_match (s, hash_head);
+ }
+ /* longest_match() sets match_start */
+
+ if (s->match_length <= 5 && (s->strategy == Z_FILTERED ||
+ (s->match_length == MIN_MATCH &&
+ s->strstart - s->match_start > TOO_FAR))) {
+
+ /* If prev_match is also MIN_MATCH, match_start is garbage
+ * but we will ignore the current match anyway.
+ */
+ s->match_length = MIN_MATCH-1;
+ }
+ }
+ /* If there was a match at the previous step and the current
+ * match is not better, output the previous match:
+ */
+ if (s->prev_length >= MIN_MATCH && s->match_length <= s->prev_length) {
+ uInt max_insert = s->strstart + s->lookahead - MIN_MATCH;
+ /* Do not insert strings in hash table beyond this. */
+
+ check_match(s, s->strstart-1, s->prev_match, s->prev_length);
+
+ _tr_tally_dist(s, s->strstart -1 - s->prev_match,
+ s->prev_length - MIN_MATCH, bflush);
+
+ /* Insert in hash table all strings up to the end of the match.
+ * strstart-1 and strstart are already inserted. If there is not
+ * enough lookahead, the last two strings are not inserted in
+ * the hash table.
+ */
+ s->lookahead -= s->prev_length-1;
+ s->prev_length -= 2;
+ do {
+ if (++s->strstart <= max_insert) {
+ INSERT_STRING(s, s->strstart, hash_head);
+ }
+ } while (--s->prev_length != 0);
+ s->match_available = 0;
+ s->match_length = MIN_MATCH-1;
+ s->strstart++;
+
+ if (bflush) FLUSH_BLOCK(s, 0);
+
+ } else if (s->match_available) {
+ /* If there was no match at the previous position, output a
+ * single literal. If there was a match but the current match
+ * is longer, truncate the previous match to a single literal.
+ */
+ Tracevv((stderr,"%c", s->window[s->strstart-1]));
+ _tr_tally_lit(s, s->window[s->strstart-1], bflush);
+ if (bflush) {
+ FLUSH_BLOCK_ONLY(s, 0);
+ }
+ s->strstart++;
+ s->lookahead--;
+ if (s->strm->avail_out == 0) return need_more;
+ } else {
+ /* There is no previous match to compare with, wait for
+ * the next step to decide.
+ */
+ s->match_available = 1;
+ s->strstart++;
+ s->lookahead--;
+ }
+ }
+ Assert (flush != Z_NO_FLUSH, "no flush?");
+ if (s->match_available) {
+ Tracevv((stderr,"%c", s->window[s->strstart-1]));
+ _tr_tally_lit(s, s->window[s->strstart-1], bflush);
+ s->match_available = 0;
+ }
+ FLUSH_BLOCK(s, flush == Z_FINISH);
+ return flush == Z_FINISH ? finish_done : block_done;
+}
Added: trunk/orca/packages/rrdtool-1.0.7.2/zlib-1.1.3/zutil.c
==============================================================================
--- trunk/orca/packages/rrdtool-1.0.7.2/zlib-1.1.3/zutil.c (original)
+++ trunk/orca/packages/rrdtool-1.0.7.2/zlib-1.1.3/zutil.c Sat Jul 13 19:22:41 2002
@@ -0,0 +1,225 @@
+/* zutil.c -- target dependent utility functions for the compression library
+ * Copyright (C) 1995-1998 Jean-loup Gailly.
+ * For conditions of distribution and use, see copyright notice in zlib.h
+ */
+
+/* @(#) $Id$ */
+
+#include "zutil.h"
+
+struct internal_state {int dummy;}; /* for buggy compilers */
+
+#ifndef STDC
+extern void exit OF((int));
+#endif
+
+const char *z_errmsg[10] = {
+"need dictionary", /* Z_NEED_DICT 2 */
+"stream end", /* Z_STREAM_END 1 */
+"", /* Z_OK 0 */
+"file error", /* Z_ERRNO (-1) */
+"stream error", /* Z_STREAM_ERROR (-2) */
+"data error", /* Z_DATA_ERROR (-3) */
+"insufficient memory", /* Z_MEM_ERROR (-4) */
+"buffer error", /* Z_BUF_ERROR (-5) */
+"incompatible version",/* Z_VERSION_ERROR (-6) */
+""};
+
+
+const char * ZEXPORT zlibVersion()
+{
+ return ZLIB_VERSION;
+}
+
+#ifdef DEBUG
+
+# ifndef verbose
+# define verbose 0
+# endif
+int z_verbose = verbose;
+
+void z_error (m)
+ char *m;
+{
+ fprintf(stderr, "%s\n", m);
+ exit(1);
+}
+#endif
+
+/* exported to allow conversion of error code to string for compress() and
+ * uncompress()
+ */
+const char * ZEXPORT zError(err)
+ int err;
+{
+ return ERR_MSG(err);
+}
+
+
+#ifndef HAVE_MEMCPY
+
+void zmemcpy(dest, source, len)
+ Bytef* dest;
+ const Bytef* source;
+ uInt len;
+{
+ if (len == 0) return;
+ do {
+ *dest++ = *source++; /* ??? to be unrolled */
+ } while (--len != 0);
+}
+
+int zmemcmp(s1, s2, len)
+ const Bytef* s1;
+ const Bytef* s2;
+ uInt len;
+{
+ uInt j;
+
+ for (j = 0; j < len; j++) {
+ if (s1[j] != s2[j]) return 2*(s1[j] > s2[j])-1;
+ }
+ return 0;
+}
+
+void zmemzero(dest, len)
+ Bytef* dest;
+ uInt len;
+{
+ if (len == 0) return;
+ do {
+ *dest++ = 0; /* ??? to be unrolled */
+ } while (--len != 0);
+}
+#endif
+
+#ifdef __TURBOC__
+#if (defined( __BORLANDC__) || !defined(SMALL_MEDIUM)) && !defined(__32BIT__)
+/* Small and medium model in Turbo C are for now limited to near allocation
+ * with reduced MAX_WBITS and MAX_MEM_LEVEL
+ */
+# define MY_ZCALLOC
+
+/* Turbo C malloc() does not allow dynamic allocation of 64K bytes
+ * and farmalloc(64K) returns a pointer with an offset of 8, so we
+ * must fix the pointer. Warning: the pointer must be put back to its
+ * original form in order to free it, use zcfree().
+ */
+
+#define MAX_PTR 10
+/* 10*64K = 640K */
+
+local int next_ptr = 0;
+
+typedef struct ptr_table_s {
+ voidpf org_ptr;
+ voidpf new_ptr;
+} ptr_table;
+
+local ptr_table table[MAX_PTR];
+/* This table is used to remember the original form of pointers
+ * to large buffers (64K). Such pointers are normalized with a zero offset.
+ * Since MSDOS is not a preemptive multitasking OS, this table is not
+ * protected from concurrent access. This hack doesn't work anyway on
+ * a protected system like OS/2. Use Microsoft C instead.
+ */
+
+voidpf zcalloc (voidpf opaque, unsigned items, unsigned size)
+{
+ voidpf buf = opaque; /* just to make some compilers happy */
+ ulg bsize = (ulg)items*size;
+
+ /* If we allocate less than 65520 bytes, we assume that farmalloc
+ * will return a usable pointer which doesn't have to be normalized.
+ */
+ if (bsize < 65520L) {
+ buf = farmalloc(bsize);
+ if (*(ush*)&buf != 0) return buf;
+ } else {
+ buf = farmalloc(bsize + 16L);
+ }
+ if (buf == NULL || next_ptr >= MAX_PTR) return NULL;
+ table[next_ptr].org_ptr = buf;
+
+ /* Normalize the pointer to seg:0 */
+ *((ush*)&buf+1) += ((ush)((uch*)buf-0) + 15) >> 4;
+ *(ush*)&buf = 0;
+ table[next_ptr++].new_ptr = buf;
+ return buf;
+}
+
+void zcfree (voidpf opaque, voidpf ptr)
+{
+ int n;
+ if (*(ush*)&ptr != 0) { /* object < 64K */
+ farfree(ptr);
+ return;
+ }
+ /* Find the original pointer */
+ for (n = 0; n < next_ptr; n++) {
+ if (ptr != table[n].new_ptr) continue;
+
+ farfree(table[n].org_ptr);
+ while (++n < next_ptr) {
+ table[n-1] = table[n];
+ }
+ next_ptr--;
+ return;
+ }
+ ptr = opaque; /* just to make some compilers happy */
+ Assert(0, "zcfree: ptr not found");
+}
+#endif
+#endif /* __TURBOC__ */
+
+
+#if defined(M_I86) && !defined(__32BIT__)
+/* Microsoft C in 16-bit mode */
+
+# define MY_ZCALLOC
+
+#if (!defined(_MSC_VER) || (_MSC_VER <= 600))
+# define _halloc halloc
+# define _hfree hfree
+#endif
+
+voidpf zcalloc (voidpf opaque, unsigned items, unsigned size)
+{
+ if (opaque) opaque = 0; /* to make compiler happy */
+ return _halloc((long)items, size);
+}
+
+void zcfree (voidpf opaque, voidpf ptr)
+{
+ if (opaque) opaque = 0; /* to make compiler happy */
+ _hfree(ptr);
+}
+
+#endif /* MSC */
+
+
+#ifndef MY_ZCALLOC /* Any system without a special alloc function */
+
+#ifndef STDC
+extern voidp calloc OF((uInt items, uInt size));
+extern void free OF((voidpf ptr));
+#endif
+
+voidpf zcalloc (opaque, items, size)
+ voidpf opaque;
+ unsigned items;
+ unsigned size;
+{
+ if (opaque) items += size - size; /* make compiler happy */
+ return (voidpf)calloc(items, size);
+}
+
+void zcfree (opaque, ptr)
+ voidpf opaque;
+ voidpf ptr;
+{
+ free(ptr);
+ if (opaque) return; /* make compiler happy */
+}
+
+#endif /* MY_ZCALLOC */
Added: trunk/orca/packages/rrdtool-1.0.7.2/zlib-1.1.3/gzio.c
==============================================================================
--- trunk/orca/packages/rrdtool-1.0.7.2/zlib-1.1.3/gzio.c (original)
+++ trunk/orca/packages/rrdtool-1.0.7.2/zlib-1.1.3/gzio.c Sat Jul 13 19:22:41 2002
@@ -0,0 +1,875 @@
+/* gzio.c -- IO on .gz files
+ * Copyright (C) 1995-1998 Jean-loup Gailly.
+ * For conditions of distribution and use, see copyright notice in zlib.h
+ *
+ * Compile this file with -DNO_DEFLATE to avoid the compression code.
+ */
+
+/* @(#) $Id$ */
+
+#include <stdio.h>
+
+#include "zutil.h"
+
+struct internal_state {int dummy;}; /* for buggy compilers */
+
+#ifndef Z_BUFSIZE
+# ifdef MAXSEG_64K
+# define Z_BUFSIZE 4096 /* minimize memory usage for 16-bit DOS */
+# else
+# define Z_BUFSIZE 16384
+# endif
+#endif
+#ifndef Z_PRINTF_BUFSIZE
+# define Z_PRINTF_BUFSIZE 4096
+#endif
+
+#define ALLOC(size) malloc(size)
+#define TRYFREE(p) {if (p) free(p);}
+
+static int gz_magic[2] = {0x1f, 0x8b}; /* gzip magic header */
+
+/* gzip flag byte */
+#define ASCII_FLAG 0x01 /* bit 0 set: file probably ascii text */
+#define HEAD_CRC 0x02 /* bit 1 set: header CRC present */
+#define EXTRA_FIELD 0x04 /* bit 2 set: extra field present */
+#define ORIG_NAME 0x08 /* bit 3 set: original file name present */
+#define COMMENT 0x10 /* bit 4 set: file comment present */
+#define RESERVED 0xE0 /* bits 5..7: reserved */
+
+typedef struct gz_stream {
+ z_stream stream;
+ int z_err; /* error code for last stream operation */
+ int z_eof; /* set if end of input file */
+ FILE *file; /* .gz file */
+ Byte *inbuf; /* input buffer */
+ Byte *outbuf; /* output buffer */
+ uLong crc; /* crc32 of uncompressed data */
+ char *msg; /* error message */
+ char *path; /* path name for debugging only */
+ int transparent; /* 1 if input file is not a .gz file */
+ char mode; /* 'w' or 'r' */
+ long startpos; /* start of compressed data in file (header skipped) */
+} gz_stream;
+
+
+local gzFile gz_open OF((const char *path, const char *mode, int fd));
+local int do_flush OF((gzFile file, int flush));
+local int get_byte OF((gz_stream *s));
+local void check_header OF((gz_stream *s));
+local int destroy OF((gz_stream *s));
+local void putLong OF((FILE *file, uLong x));
+local uLong getLong OF((gz_stream *s));
+
+/* ===========================================================================
+ Opens a gzip (.gz) file for reading or writing. The mode parameter
+ is as in fopen ("rb" or "wb"). The file is given either by file descriptor
+ or path name (if fd == -1).
+ gz_open return NULL if the file could not be opened or if there was
+ insufficient memory to allocate the (de)compression state; errno
+ can be checked to distinguish the two cases (if errno is zero, the
+ zlib error is Z_MEM_ERROR).
+*/
+local gzFile gz_open (path, mode, fd)
+ const char *path;
+ const char *mode;
+ int fd;
+{
+ int err;
+ int level = Z_DEFAULT_COMPRESSION; /* compression level */
+ int strategy = Z_DEFAULT_STRATEGY; /* compression strategy */
+ char *p = (char*)mode;
+ gz_stream *s;
+ char fmode[80]; /* copy of mode, without the compression level */
+ char *m = fmode;
+
+ if (!path || !mode) return Z_NULL;
+
+ s = (gz_stream *)ALLOC(sizeof(gz_stream));
+ if (!s) return Z_NULL;
+
+ s->stream.zalloc = (alloc_func)0;
+ s->stream.zfree = (free_func)0;
+ s->stream.opaque = (voidpf)0;
+ s->stream.next_in = s->inbuf = Z_NULL;
+ s->stream.next_out = s->outbuf = Z_NULL;
+ s->stream.avail_in = s->stream.avail_out = 0;
+ s->file = NULL;
+ s->z_err = Z_OK;
+ s->z_eof = 0;
+ s->crc = crc32(0L, Z_NULL, 0);
+ s->msg = NULL;
+ s->transparent = 0;
+
+ s->path = (char*)ALLOC(strlen(path)+1);
+ if (s->path == NULL) {
+ return destroy(s), (gzFile)Z_NULL;
+ }
+ strcpy(s->path, path); /* do this early for debugging */
+
+ s->mode = '\0';
+ do {
+ if (*p == 'r') s->mode = 'r';
+ if (*p == 'w' || *p == 'a') s->mode = 'w';
+ if (*p >= '0' && *p <= '9') {
+ level = *p - '0';
+ } else if (*p == 'f') {
+ strategy = Z_FILTERED;
+ } else if (*p == 'h') {
+ strategy = Z_HUFFMAN_ONLY;
+ } else {
+ *m++ = *p; /* copy the mode */
+ }
+ } while (*p++ && m != fmode + sizeof(fmode));
+ if (s->mode == '\0') return destroy(s), (gzFile)Z_NULL;
+
+ if (s->mode == 'w') {
+#ifdef NO_DEFLATE
+ err = Z_STREAM_ERROR;
+#else
+ err = deflateInit2(&(s->stream), level,
+ Z_DEFLATED, -MAX_WBITS, DEF_MEM_LEVEL, strategy);
+ /* windowBits is passed < 0 to suppress zlib header */
+
+ s->stream.next_out = s->outbuf = (Byte*)ALLOC(Z_BUFSIZE);
+#endif
+ if (err != Z_OK || s->outbuf == Z_NULL) {
+ return destroy(s), (gzFile)Z_NULL;
+ }
+ } else {
+ s->stream.next_in = s->inbuf = (Byte*)ALLOC(Z_BUFSIZE);
+
+ err = inflateInit2(&(s->stream), -MAX_WBITS);
+ /* windowBits is passed < 0 to tell that there is no zlib header.
+ * Note that in this case inflate *requires* an extra "dummy" byte
+ * after the compressed stream in order to complete decompression and
+ * return Z_STREAM_END. Here the gzip CRC32 ensures that 4 bytes are
+ * present after the compressed stream.
+ */
+ if (err != Z_OK || s->inbuf == Z_NULL) {
+ return destroy(s), (gzFile)Z_NULL;
+ }
+ }
+ s->stream.avail_out = Z_BUFSIZE;
+
+ errno = 0;
+ s->file = fd < 0 ? F_OPEN(path, fmode) : (FILE*)fdopen(fd, fmode);
+
+ if (s->file == NULL) {
+ return destroy(s), (gzFile)Z_NULL;
+ }
+ if (s->mode == 'w') {
+ /* Write a very simple .gz header:
+ */
+ fprintf(s->file, "%c%c%c%c%c%c%c%c%c%c", gz_magic[0], gz_magic[1],
+ Z_DEFLATED, 0 /*flags*/, 0,0,0,0 /*time*/, 0 /*xflags*/, OS_CODE);
+ s->startpos = 10L;
+ /* We use 10L instead of ftell(s->file) to because ftell causes an
+ * fflush on some systems. This version of the library doesn't use
+ * startpos anyway in write mode, so this initialization is not
+ * necessary.
+ */
+ } else {
+ check_header(s); /* skip the .gz header */
+ s->startpos = (ftell(s->file) - s->stream.avail_in);
+ }
+
+ return (gzFile)s;
+}
+
+/* ===========================================================================
+ Opens a gzip (.gz) file for reading or writing.
+*/
+gzFile ZEXPORT gzopen (path, mode)
+ const char *path;
+ const char *mode;
+{
+ return gz_open (path, mode, -1);
+}
+
+/* ===========================================================================
+ Associate a gzFile with the file descriptor fd. fd is not dup'ed here
+ to mimic the behavio(u)r of fdopen.
+*/
+gzFile ZEXPORT gzdopen (fd, mode)
+ int fd;
+ const char *mode;
+{
+ char name[20];
+
+ if (fd < 0) return (gzFile)Z_NULL;
+ sprintf(name, "<fd:%d>", fd); /* for debugging */
+
+ return gz_open (name, mode, fd);
+}
+
+/* ===========================================================================
+ * Update the compression level and strategy
+ */
+int ZEXPORT gzsetparams (file, level, strategy)
+ gzFile file;
+ int level;
+ int strategy;
+{
+ gz_stream *s = (gz_stream*)file;
+
+ if (s == NULL || s->mode != 'w') return Z_STREAM_ERROR;
+
+ /* Make room to allow flushing */
+ if (s->stream.avail_out == 0) {
+
+ s->stream.next_out = s->outbuf;
+ if (fwrite(s->outbuf, 1, Z_BUFSIZE, s->file) != Z_BUFSIZE) {
+ s->z_err = Z_ERRNO;
+ }
+ s->stream.avail_out = Z_BUFSIZE;
+ }
+
+ return deflateParams (&(s->stream), level, strategy);
+}
+
+/* ===========================================================================
+ Read a byte from a gz_stream; update next_in and avail_in. Return EOF
+ for end of file.
+ IN assertion: the stream s has been sucessfully opened for reading.
+*/
+local int get_byte(s)
+ gz_stream *s;
+{
+ if (s->z_eof) return EOF;
+ if (s->stream.avail_in == 0) {
+ errno = 0;
+ s->stream.avail_in = fread(s->inbuf, 1, Z_BUFSIZE, s->file);
+ if (s->stream.avail_in == 0) {
+ s->z_eof = 1;
+ if (ferror(s->file)) s->z_err = Z_ERRNO;
+ return EOF;
+ }
+ s->stream.next_in = s->inbuf;
+ }
+ s->stream.avail_in--;
+ return *(s->stream.next_in)++;
+}
+
+/* ===========================================================================
+ Check the gzip header of a gz_stream opened for reading. Set the stream
+ mode to transparent if the gzip magic header is not present; set s->err
+ to Z_DATA_ERROR if the magic header is present but the rest of the header
+ is incorrect.
+ IN assertion: the stream s has already been created sucessfully;
+ s->stream.avail_in is zero for the first time, but may be non-zero
+ for concatenated .gz files.
+*/
+local void check_header(s)
+ gz_stream *s;
+{
+ int method; /* method byte */
+ int flags; /* flags byte */
+ uInt len;
+ int c;
+
+ /* Check the gzip magic header */
+ for (len = 0; len < 2; len++) {
+ c = get_byte(s);
+ if (c != gz_magic[len]) {
+ if (len != 0) s->stream.avail_in++, s->stream.next_in--;
+ if (c != EOF) {
+ s->stream.avail_in++, s->stream.next_in--;
+ s->transparent = 1;
+ }
+ s->z_err = s->stream.avail_in != 0 ? Z_OK : Z_STREAM_END;
+ return;
+ }
+ }
+ method = get_byte(s);
+ flags = get_byte(s);
+ if (method != Z_DEFLATED || (flags & RESERVED) != 0) {
+ s->z_err = Z_DATA_ERROR;
+ return;
+ }
+
+ /* Discard time, xflags and OS code: */
+ for (len = 0; len < 6; len++) (void)get_byte(s);
+
+ if ((flags & EXTRA_FIELD) != 0) { /* skip the extra field */
+ len = (uInt)get_byte(s);
+ len += ((uInt)get_byte(s))<<8;
+ /* len is garbage if EOF but the loop below will quit anyway */
+ while (len-- != 0 && get_byte(s) != EOF) ;
+ }
+ if ((flags & ORIG_NAME) != 0) { /* skip the original file name */
+ while ((c = get_byte(s)) != 0 && c != EOF) ;
+ }
+ if ((flags & COMMENT) != 0) { /* skip the .gz file comment */
+ while ((c = get_byte(s)) != 0 && c != EOF) ;
+ }
+ if ((flags & HEAD_CRC) != 0) { /* skip the header crc */
+ for (len = 0; len < 2; len++) (void)get_byte(s);
+ }
+ s->z_err = s->z_eof ? Z_DATA_ERROR : Z_OK;
+}
+
+ /* ===========================================================================
+ * Cleanup then free the given gz_stream. Return a zlib error code.
+ Try freeing in the reverse order of allocations.
+ */
+local int destroy (s)
+ gz_stream *s;
+{
+ int err = Z_OK;
+
+ if (!s) return Z_STREAM_ERROR;
+
+ TRYFREE(s->msg);
+
+ if (s->stream.state != NULL) {
+ if (s->mode == 'w') {
+#ifdef NO_DEFLATE
+ err = Z_STREAM_ERROR;
+#else
+ err = deflateEnd(&(s->stream));
+#endif
+ } else if (s->mode == 'r') {
+ err = inflateEnd(&(s->stream));
+ }
+ }
+ if (s->file != NULL && fclose(s->file)) {
+#ifdef ESPIPE
+ if (errno != ESPIPE) /* fclose is broken for pipes in HP/UX */
+#endif
+ err = Z_ERRNO;
+ }
+ if (s->z_err < 0) err = s->z_err;
+
+ TRYFREE(s->inbuf);
+ TRYFREE(s->outbuf);
+ TRYFREE(s->path);
+ TRYFREE(s);
+ return err;
+}
+
+/* ===========================================================================
+ Reads the given number of uncompressed bytes from the compressed file.
+ gzread returns the number of bytes actually read (0 for end of file).
+*/
+int ZEXPORT gzread (file, buf, len)
+ gzFile file;
+ voidp buf;
+ unsigned len;
+{
+ gz_stream *s = (gz_stream*)file;
+ Bytef *start = (Bytef*)buf; /* starting point for crc computation */
+ Byte *next_out; /* == stream.next_out but not forced far (for MSDOS) */
+
+ if (s == NULL || s->mode != 'r') return Z_STREAM_ERROR;
+
+ if (s->z_err == Z_DATA_ERROR || s->z_err == Z_ERRNO) return -1;
+ if (s->z_err == Z_STREAM_END) return 0; /* EOF */
+
+ next_out = (Byte*)buf;
+ s->stream.next_out = (Bytef*)buf;
+ s->stream.avail_out = len;
+
+ while (s->stream.avail_out != 0) {
+
+ if (s->transparent) {
+ /* Copy first the lookahead bytes: */
+ uInt n = s->stream.avail_in;
+ if (n > s->stream.avail_out) n = s->stream.avail_out;
+ if (n > 0) {
+ zmemcpy(s->stream.next_out, s->stream.next_in, n);
+ next_out += n;
+ s->stream.next_out = next_out;
+ s->stream.next_in += n;
+ s->stream.avail_out -= n;
+ s->stream.avail_in -= n;
+ }
+ if (s->stream.avail_out > 0) {
+ s->stream.avail_out -= fread(next_out, 1, s->stream.avail_out,
+ s->file);
+ }
+ len -= s->stream.avail_out;
+ s->stream.total_in += (uLong)len;
+ s->stream.total_out += (uLong)len;
+ if (len == 0) s->z_eof = 1;
+ return (int)len;
+ }
+ if (s->stream.avail_in == 0 && !s->z_eof) {
+
+ errno = 0;
+ s->stream.avail_in = fread(s->inbuf, 1, Z_BUFSIZE, s->file);
+ if (s->stream.avail_in == 0) {
+ s->z_eof = 1;
+ if (ferror(s->file)) {
+ s->z_err = Z_ERRNO;
+ break;
+ }
+ }
+ s->stream.next_in = s->inbuf;
+ }
+ s->z_err = inflate(&(s->stream), Z_NO_FLUSH);
+
+ if (s->z_err == Z_STREAM_END) {
+ /* Check CRC and original size */
+ s->crc = crc32(s->crc, start, (uInt)(s->stream.next_out - start));
+ start = s->stream.next_out;
+
+ if (getLong(s) != s->crc) {
+ s->z_err = Z_DATA_ERROR;
+ } else {
+ (void)getLong(s);
+ /* The uncompressed length returned by above getlong() may
+ * be different from s->stream.total_out) in case of
+ * concatenated .gz files. Check for such files:
+ */
+ check_header(s);
+ if (s->z_err == Z_OK) {
+ uLong total_in = s->stream.total_in;
+ uLong total_out = s->stream.total_out;
+
+ inflateReset(&(s->stream));
+ s->stream.total_in = total_in;
+ s->stream.total_out = total_out;
+ s->crc = crc32(0L, Z_NULL, 0);
+ }
+ }
+ }
+ if (s->z_err != Z_OK || s->z_eof) break;
+ }
+ s->crc = crc32(s->crc, start, (uInt)(s->stream.next_out - start));
+
+ return (int)(len - s->stream.avail_out);
+}
+
+
+/* ===========================================================================
+ Reads one byte from the compressed file. gzgetc returns this byte
+ or -1 in case of end of file or error.
+*/
+int ZEXPORT gzgetc(file)
+ gzFile file;
+{
+ unsigned char c;
+
+ return gzread(file, &c, 1) == 1 ? c : -1;
+}
+
+
+/* ===========================================================================
+ Reads bytes from the compressed file until len-1 characters are
+ read, or a newline character is read and transferred to buf, or an
+ end-of-file condition is encountered. The string is then terminated
+ with a null character.
+ gzgets returns buf, or Z_NULL in case of error.
+
+ The current implementation is not optimized at all.
+*/
+char * ZEXPORT gzgets(file, buf, len)
+ gzFile file;
+ char *buf;
+ int len;
+{
+ char *b = buf;
+ if (buf == Z_NULL || len <= 0) return Z_NULL;
+
+ while (--len > 0 && gzread(file, buf, 1) == 1 && *buf++ != '\n') ;
+ *buf = '\0';
+ return b == buf && len > 0 ? Z_NULL : b;
+}
+
+
+#ifndef NO_DEFLATE
+/* ===========================================================================
+ Writes the given number of uncompressed bytes into the compressed file.
+ gzwrite returns the number of bytes actually written (0 in case of error).
+*/
+int ZEXPORT gzwrite (file, buf, len)
+ gzFile file;
+ const voidp buf;
+ unsigned len;
+{
+ gz_stream *s = (gz_stream*)file;
+
+ if (s == NULL || s->mode != 'w') return Z_STREAM_ERROR;
+
+ s->stream.next_in = (Bytef*)buf;
+ s->stream.avail_in = len;
+
+ while (s->stream.avail_in != 0) {
+
+ if (s->stream.avail_out == 0) {
+
+ s->stream.next_out = s->outbuf;
+ if (fwrite(s->outbuf, 1, Z_BUFSIZE, s->file) != Z_BUFSIZE) {
+ s->z_err = Z_ERRNO;
+ break;
+ }
+ s->stream.avail_out = Z_BUFSIZE;
+ }
+ s->z_err = deflate(&(s->stream), Z_NO_FLUSH);
+ if (s->z_err != Z_OK) break;
+ }
+ s->crc = crc32(s->crc, (const Bytef *)buf, len);
+
+ return (int)(len - s->stream.avail_in);
+}
+
+/* ===========================================================================
+ Converts, formats, and writes the args to the compressed file under
+ control of the format string, as in fprintf. gzprintf returns the number of
+ uncompressed bytes actually written (0 in case of error).
+*/
+#ifdef STDC
+#include <stdarg.h>
+
+int ZEXPORTVA gzprintf (gzFile file, const char *format, /* args */ ...)
+{
+ char buf[Z_PRINTF_BUFSIZE];
+ va_list va;
+ int len;
+
+ va_start(va, format);
+#ifdef HAS_vsnprintf
+ (void)vsnprintf(buf, sizeof(buf), format, va);
+#else
+ (void)vsprintf(buf, format, va);
+#endif
+ va_end(va);
+ len = strlen(buf); /* some *sprintf don't return the nb of bytes written */
+ if (len <= 0) return 0;
+
+ return gzwrite(file, buf, (unsigned)len);
+}
+#else /* not ANSI C */
+
+int ZEXPORTVA gzprintf (file, format, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10,
+ a11, a12, a13, a14, a15, a16, a17, a18, a19, a20)
+ gzFile file;
+ const char *format;
+ int a1, a2, a3, a4, a5, a6, a7, a8, a9, a10,
+ a11, a12, a13, a14, a15, a16, a17, a18, a19, a20;
+{
+ char buf[Z_PRINTF_BUFSIZE];
+ int len;
+
+#ifdef HAS_snprintf
+ snprintf(buf, sizeof(buf), format, a1, a2, a3, a4, a5, a6, a7, a8,
+ a9, a10, a11, a12, a13, a14, a15, a16, a17, a18, a19, a20);
+#else
+ sprintf(buf, format, a1, a2, a3, a4, a5, a6, a7, a8,
+ a9, a10, a11, a12, a13, a14, a15, a16, a17, a18, a19, a20);
+#endif
+ len = strlen(buf); /* old sprintf doesn't return the nb of bytes written */
+ if (len <= 0) return 0;
+
+ return gzwrite(file, buf, len);
+}
+#endif
+
+/* ===========================================================================
+ Writes c, converted to an unsigned char, into the compressed file.
+ gzputc returns the value that was written, or -1 in case of error.
+*/
+int ZEXPORT gzputc(file, c)
+ gzFile file;
+ int c;
+{
+ unsigned char cc = (unsigned char) c; /* required for big endian systems */
+
+ return gzwrite(file, &cc, 1) == 1 ? (int)cc : -1;
+}
+
+
+/* ===========================================================================
+ Writes the given null-terminated string to the compressed file, excluding
+ the terminating null character.
+ gzputs returns the number of characters written, or -1 in case of error.
+*/
+int ZEXPORT gzputs(file, s)
+ gzFile file;
+ const char *s;
+{
+ return gzwrite(file, (char*)s, (unsigned)strlen(s));
+}
+
+
+/* ===========================================================================
+ Flushes all pending output into the compressed file. The parameter
+ flush is as in the deflate() function.
+*/
+local int do_flush (file, flush)
+ gzFile file;
+ int flush;
+{
+ uInt len;
+ int done = 0;
+ gz_stream *s = (gz_stream*)file;
+
+ if (s == NULL || s->mode != 'w') return Z_STREAM_ERROR;
+
+ s->stream.avail_in = 0; /* should be zero already anyway */
+
+ for (;;) {
+ len = Z_BUFSIZE - s->stream.avail_out;
+
+ if (len != 0) {
+ if ((uInt)fwrite(s->outbuf, 1, len, s->file) != len) {
+ s->z_err = Z_ERRNO;
+ return Z_ERRNO;
+ }
+ s->stream.next_out = s->outbuf;
+ s->stream.avail_out = Z_BUFSIZE;
+ }
+ if (done) break;
+ s->z_err = deflate(&(s->stream), flush);
+
+ /* Ignore the second of two consecutive flushes: */
+ if (len == 0 && s->z_err == Z_BUF_ERROR) s->z_err = Z_OK;
+
+ /* deflate has finished flushing only when it hasn't used up
+ * all the available space in the output buffer:
+ */
+ done = (s->stream.avail_out != 0 || s->z_err == Z_STREAM_END);
+
+ if (s->z_err != Z_OK && s->z_err != Z_STREAM_END) break;
+ }
+ return s->z_err == Z_STREAM_END ? Z_OK : s->z_err;
+}
+
+int ZEXPORT gzflush (file, flush)
+ gzFile file;
+ int flush;
+{
+ gz_stream *s = (gz_stream*)file;
+ int err = do_flush (file, flush);
+
+ if (err) return err;
+ fflush(s->file);
+ return s->z_err == Z_STREAM_END ? Z_OK : s->z_err;
+}
+#endif /* NO_DEFLATE */
+
+/* ===========================================================================
+ Sets the starting position for the next gzread or gzwrite on the given
+ compressed file. The offset represents a number of bytes in the
+ gzseek returns the resulting offset location as measured in bytes from
+ the beginning of the uncompressed stream, or -1 in case of error.
+ SEEK_END is not implemented, returns error.
+ In this version of the library, gzseek can be extremely slow.
+*/
+z_off_t ZEXPORT gzseek (file, offset, whence)
+ gzFile file;
+ z_off_t offset;
+ int whence;
+{
+ gz_stream *s = (gz_stream*)file;
+
+ if (s == NULL || whence == SEEK_END ||
+ s->z_err == Z_ERRNO || s->z_err == Z_DATA_ERROR) {
+ return -1L;
+ }
+
+ if (s->mode == 'w') {
+#ifdef NO_DEFLATE
+ return -1L;
+#else
+ if (whence == SEEK_SET) {
+ offset -= s->stream.total_in;
+ }
+ if (offset < 0) return -1L;
+
+ /* At this point, offset is the number of zero bytes to write. */
+ if (s->inbuf == Z_NULL) {
+ s->inbuf = (Byte*)ALLOC(Z_BUFSIZE); /* for seeking */
+ zmemzero(s->inbuf, Z_BUFSIZE);
+ }
+ while (offset > 0) {
+ uInt size = Z_BUFSIZE;
+ if (offset < Z_BUFSIZE) size = (uInt)offset;
+
+ size = gzwrite(file, s->inbuf, size);
+ if (size == 0) return -1L;
+
+ offset -= size;
+ }
+ return (z_off_t)s->stream.total_in;
+#endif
+ }
+ /* Rest of function is for reading only */
+
+ /* compute absolute position */
+ if (whence == SEEK_CUR) {
+ offset += s->stream.total_out;
+ }
+ if (offset < 0) return -1L;
+
+ if (s->transparent) {
+ /* map to fseek */
+ s->stream.avail_in = 0;
+ s->stream.next_in = s->inbuf;
+ if (fseek(s->file, offset, SEEK_SET) < 0) return -1L;
+
+ s->stream.total_in = s->stream.total_out = (uLong)offset;
+ return offset;
+ }
+
+ /* For a negative seek, rewind and use positive seek */
+ if ((uLong)offset >= s->stream.total_out) {
+ offset -= s->stream.total_out;
+ } else if (gzrewind(file) < 0) {
+ return -1L;
+ }
+ /* offset is now the number of bytes to skip. */
+
+ if (offset != 0 && s->outbuf == Z_NULL) {
+ s->outbuf = (Byte*)ALLOC(Z_BUFSIZE);
+ }
+ while (offset > 0) {
+ int size = Z_BUFSIZE;
+ if (offset < Z_BUFSIZE) size = (int)offset;
+
+ size = gzread(file, s->outbuf, (uInt)size);
+ if (size <= 0) return -1L;
+ offset -= size;
+ }
+ return (z_off_t)s->stream.total_out;
+}
+
+/* ===========================================================================
+ Rewinds input file.
+*/
+int ZEXPORT gzrewind (file)
+ gzFile file;
+{
+ gz_stream *s = (gz_stream*)file;
+
+ if (s == NULL || s->mode != 'r') return -1;
+
+ s->z_err = Z_OK;
+ s->z_eof = 0;
+ s->stream.avail_in = 0;
+ s->stream.next_in = s->inbuf;
+ s->crc = crc32(0L, Z_NULL, 0);
+
+ if (s->startpos == 0) { /* not a compressed file */
+ rewind(s->file);
+ return 0;
+ }
+
+ (void) inflateReset(&s->stream);
+ return fseek(s->file, s->startpos, SEEK_SET);
+}
+
+/* ===========================================================================
+ Returns the starting position for the next gzread or gzwrite on the
+ given compressed file. This position represents a number of bytes in the
+ uncompressed data stream.
+*/
+z_off_t ZEXPORT gztell (file)
+ gzFile file;
+{
+ return gzseek(file, 0L, SEEK_CUR);
+}
+
+/* ===========================================================================
+ Returns 1 when EOF has previously been detected reading the given
+ input stream, otherwise zero.
+*/
+int ZEXPORT gzeof (file)
+ gzFile file;
+{
+ gz_stream *s = (gz_stream*)file;
+
+ return (s == NULL || s->mode != 'r') ? 0 : s->z_eof;
+}
+
+/* ===========================================================================
+ Outputs a long in LSB order to the given file
+*/
+local void putLong (file, x)
+ FILE *file;
+ uLong x;
+{
+ int n;
+ for (n = 0; n < 4; n++) {
+ fputc((int)(x & 0xff), file);
+ x >>= 8;
+ }
+}
+
+/* ===========================================================================
+ Reads a long in LSB order from the given gz_stream. Sets z_err in case
+ of error.
+*/
+local uLong getLong (s)
+ gz_stream *s;
+{
+ uLong x = (uLong)get_byte(s);
+ int c;
+
+ x += ((uLong)get_byte(s))<<8;
+ x += ((uLong)get_byte(s))<<16;
+ c = get_byte(s);
+ if (c == EOF) s->z_err = Z_DATA_ERROR;
+ x += ((uLong)c)<<24;
+ return x;
+}
+
+/* ===========================================================================
+ Flushes all pending output if necessary, closes the compressed file
+ and deallocates all the (de)compression state.
+*/
+int ZEXPORT gzclose (file)
+ gzFile file;
+{
+ int err;
+ gz_stream *s = (gz_stream*)file;
+
+ if (s == NULL) return Z_STREAM_ERROR;
+
+ if (s->mode == 'w') {
+#ifdef NO_DEFLATE
+ return Z_STREAM_ERROR;
+#else
+ err = do_flush (file, Z_FINISH);
+ if (err != Z_OK) return destroy((gz_stream*)file);
+
+ putLong (s->file, s->crc);
+ putLong (s->file, s->stream.total_in);
+#endif
+ }
+ return destroy((gz_stream*)file);
+}
+
+/* ===========================================================================
+ Returns the error message for the last error which occured on the
+ given compressed file. errnum is set to zlib error number. If an
+ error occured in the file system and not in the compression library,
+ errnum is set to Z_ERRNO and the application may consult errno
+ to get the exact error code.
+*/
+const char* ZEXPORT gzerror (file, errnum)
+ gzFile file;
+ int *errnum;
+{
+ char *m;
+ gz_stream *s = (gz_stream*)file;
+
+ if (s == NULL) {
+ *errnum = Z_STREAM_ERROR;
+ return (const char*)ERR_MSG(Z_STREAM_ERROR);
+ }
+ *errnum = s->z_err;
+ if (*errnum == Z_OK) return (const char*)"";
+
+ m = (char*)(*errnum == Z_ERRNO ? zstrerror(errno) : s->stream.msg);
+
+ if (m == NULL || *m == '\0') m = (char*)ERR_MSG(s->z_err);
+
+ TRYFREE(s->msg);
+ s->msg = (char*)ALLOC(strlen(s->path) + strlen(m) + 3);
+ strcpy(s->msg, s->path);
+ strcat(s->msg, ": ");
+ strcat(s->msg, m);
+ return (const char*)s->msg;
+}
Added: trunk/orca/packages/rrdtool-1.0.7.2/zlib-1.1.3/deflate.h
==============================================================================
--- trunk/orca/packages/rrdtool-1.0.7.2/zlib-1.1.3/deflate.h (original)
+++ trunk/orca/packages/rrdtool-1.0.7.2/zlib-1.1.3/deflate.h Sat Jul 13 19:22:41 2002
@@ -0,0 +1,318 @@
+/* deflate.h -- internal compression state
+ * Copyright (C) 1995-1998 Jean-loup Gailly
+ * For conditions of distribution and use, see copyright notice in zlib.h
+ */
+
+/* WARNING: this file should *not* be used by applications. It is
+ part of the implementation of the compression library and is
+ subject to change. Applications should only use zlib.h.
+ */
+
+/* @(#) $Id$ */
+
+#ifndef _DEFLATE_H
+#define _DEFLATE_H
+
+#include "zutil.h"
+
+/* ===========================================================================
+ * Internal compression state.
+ */
+
+#define LENGTH_CODES 29
+/* number of length codes, not counting the special END_BLOCK code */
+
+#define LITERALS 256
+/* number of literal bytes 0..255 */
+
+#define L_CODES (LITERALS+1+LENGTH_CODES)
+/* number of Literal or Length codes, including the END_BLOCK code */
+
+#define D_CODES 30
+/* number of distance codes */
+
+#define BL_CODES 19
+/* number of codes used to transfer the bit lengths */
+
+#define HEAP_SIZE (2*L_CODES+1)
+/* maximum heap size */
+
+#define MAX_BITS 15
+/* All codes must not exceed MAX_BITS bits */
+
+#define INIT_STATE 42
+#define BUSY_STATE 113
+#define FINISH_STATE 666
+/* Stream status */
+
+
+/* Data structure describing a single value and its code string. */
+typedef struct ct_data_s {
+ union {
+ ush freq; /* frequency count */
+ ush code; /* bit string */
+ } fc;
+ union {
+ ush dad; /* father node in Huffman tree */
+ ush len; /* length of bit string */
+ } dl;
+} FAR ct_data;
+
+#define Freq fc.freq
+#define Code fc.code
+#define Dad dl.dad
+#define Len dl.len
+
+typedef struct static_tree_desc_s static_tree_desc;
+
+typedef struct tree_desc_s {
+ ct_data *dyn_tree; /* the dynamic tree */
+ int max_code; /* largest code with non zero frequency */
+ static_tree_desc *stat_desc; /* the corresponding static tree */
+} FAR tree_desc;
+
+typedef ush Pos;
+typedef Pos FAR Posf;
+typedef unsigned IPos;
+
+/* A Pos is an index in the character window. We use short instead of int to
+ * save space in the various tables. IPos is used only for parameter passing.
+ */
+
+typedef struct internal_state {
+ z_streamp strm; /* pointer back to this zlib stream */
+ int status; /* as the name implies */
+ Bytef *pending_buf; /* output still pending */
+ ulg pending_buf_size; /* size of pending_buf */
+ Bytef *pending_out; /* next pending byte to output to the stream */
+ int pending; /* nb of bytes in the pending buffer */
+ int noheader; /* suppress zlib header and adler32 */
+ Byte data_type; /* UNKNOWN, BINARY or ASCII */
+ Byte method; /* STORED (for zip only) or DEFLATED */
+ int last_flush; /* value of flush param for previous deflate call */
+
+ /* used by deflate.c: */
+
+ uInt w_size; /* LZ77 window size (32K by default) */
+ uInt w_bits; /* log2(w_size) (8..16) */
+ uInt w_mask; /* w_size - 1 */
+
+ Bytef *window;
+ /* Sliding window. Input bytes are read into the second half of the window,
+ * and move to the first half later to keep a dictionary of at least wSize
+ * bytes. With this organization, matches are limited to a distance of
+ * wSize-MAX_MATCH bytes, but this ensures that IO is always
+ * performed with a length multiple of the block size. Also, it limits
+ * the window size to 64K, which is quite useful on MSDOS.
+ * To do: use the user input buffer as sliding window.
+ */
+
+ ulg window_size;
+ /* Actual size of window: 2*wSize, except when the user input buffer
+ * is directly used as sliding window.
+ */
+
+ Posf *prev;
+ /* Link to older string with same hash index. To limit the size of this
+ * array to 64K, this link is maintained only for the last 32K strings.
+ * An index in this array is thus a window index modulo 32K.
+ */
+
+ Posf *head; /* Heads of the hash chains or NIL. */
+
+ uInt ins_h; /* hash index of string to be inserted */
+ uInt hash_size; /* number of elements in hash table */
+ uInt hash_bits; /* log2(hash_size) */
+ uInt hash_mask; /* hash_size-1 */
+
+ uInt hash_shift;
+ /* Number of bits by which ins_h must be shifted at each input
+ * step. It must be such that after MIN_MATCH steps, the oldest
+ * byte no longer takes part in the hash key, that is:
+ * hash_shift * MIN_MATCH >= hash_bits
+ */
+
+ long block_start;
+ /* Window position at the beginning of the current output block. Gets
+ * negative when the window is moved backwards.
+ */
+
+ uInt match_length; /* length of best match */
+ IPos prev_match; /* previous match */
+ int match_available; /* set if previous match exists */
+ uInt strstart; /* start of string to insert */
+ uInt match_start; /* start of matching string */
+ uInt lookahead; /* number of valid bytes ahead in window */
+
+ uInt prev_length;
+ /* Length of the best match at previous step. Matches not greater than this
+ * are discarded. This is used in the lazy match evaluation.
+ */
+
+ uInt max_chain_length;
+ /* To speed up deflation, hash chains are never searched beyond this
+ * length. A higher limit improves compression ratio but degrades the
+ * speed.
+ */
+
+ uInt max_lazy_match;
+ /* Attempt to find a better match only when the current match is strictly
+ * smaller than this value. This mechanism is used only for compression
+ * levels >= 4.
+ */
+# define max_insert_length max_lazy_match
+ /* Insert new strings in the hash table only if the match length is not
+ * greater than this length. This saves time but degrades compression.
+ * max_insert_length is used only for compression levels <= 3.
+ */
+
+ int level; /* compression level (1..9) */
+ int strategy; /* favor or force Huffman coding*/
+
+ uInt good_match;
+ /* Use a faster search when the previous match is longer than this */
+
+ int nice_match; /* Stop searching when current match exceeds this */
+
+ /* used by trees.c: */
+ /* Didn't use ct_data typedef below to supress compiler warning */
+ struct ct_data_s dyn_ltree[HEAP_SIZE]; /* literal and length tree */
+ struct ct_data_s dyn_dtree[2*D_CODES+1]; /* distance tree */
+ struct ct_data_s bl_tree[2*BL_CODES+1]; /* Huffman tree for bit lengths */
+
+ struct tree_desc_s l_desc; /* desc. for literal tree */
+ struct tree_desc_s d_desc; /* desc. for distance tree */
+ struct tree_desc_s bl_desc; /* desc. for bit length tree */
+
+ ush bl_count[MAX_BITS+1];
+ /* number of codes at each bit length for an optimal tree */
+
+ int heap[2*L_CODES+1]; /* heap used to build the Huffman trees */
+ int heap_len; /* number of elements in the heap */
+ int heap_max; /* element of largest frequency */
+ /* The sons of heap[n] are heap[2*n] and heap[2*n+1]. heap[0] is not used.
+ * The same heap array is used to build all trees.
+ */
+
+ uch depth[2*L_CODES+1];
+ /* Depth of each subtree used as tie breaker for trees of equal frequency
+ */
+
+ uchf *l_buf; /* buffer for literals or lengths */
+
+ uInt lit_bufsize;
+ /* Size of match buffer for literals/lengths. There are 4 reasons for
+ * limiting lit_bufsize to 64K:
+ * - frequencies can be kept in 16 bit counters
+ * - if compression is not successful for the first block, all input
+ * data is still in the window so we can still emit a stored block even
+ * when input comes from standard input. (This can also be done for
+ * all blocks if lit_bufsize is not greater than 32K.)
+ * - if compression is not successful for a file smaller than 64K, we can
+ * even emit a stored file instead of a stored block (saving 5 bytes).
+ * This is applicable only for zip (not gzip or zlib).
+ * - creating new Huffman trees less frequently may not provide fast
+ * adaptation to changes in the input data statistics. (Take for
+ * example a binary file with poorly compressible code followed by
+ * a highly compressible string table.) Smaller buffer sizes give
+ * fast adaptation but have of course the overhead of transmitting
+ * trees more frequently.
+ * - I can't count above 4
+ */
+
+ uInt last_lit; /* running index in l_buf */
+
+ ushf *d_buf;
+ /* Buffer for distances. To simplify the code, d_buf and l_buf have
+ * the same number of elements. To use different lengths, an extra flag
+ * array would be necessary.
+ */
+
+ ulg opt_len; /* bit length of current block with optimal trees */
+ ulg static_len; /* bit length of current block with static trees */
+ uInt matches; /* number of string matches in current block */
+ int last_eob_len; /* bit length of EOB code for last block */
+
+#ifdef DEBUG
+ ulg compressed_len; /* total bit length of compressed file mod 2^32 */
+ ulg bits_sent; /* bit length of compressed data sent mod 2^32 */
+#endif
+
+ ush bi_buf;
+ /* Output buffer. bits are inserted starting at the bottom (least
+ * significant bits).
+ */
+ int bi_valid;
+ /* Number of valid bits in bi_buf. All bits above the last valid bit
+ * are always zero.
+ */
+
+} FAR deflate_state;
+
+/* Output a byte on the stream.
+ * IN assertion: there is enough room in pending_buf.
+ */
+#define put_byte(s, c) {s->pending_buf[s->pending++] = (c);}
+
+
+#define MIN_LOOKAHEAD (MAX_MATCH+MIN_MATCH+1)
+/* Minimum amount of lookahead, except at the end of the input file.
+ * See deflate.c for comments about the MIN_MATCH+1.
+ */
+
+#define MAX_DIST(s) ((s)->w_size-MIN_LOOKAHEAD)
+/* In order to simplify the code, particularly on 16 bit machines, match
+ * distances are limited to MAX_DIST instead of WSIZE.
+ */
+
+ /* in trees.c */
+void _tr_init OF((deflate_state *s));
+int _tr_tally OF((deflate_state *s, unsigned dist, unsigned lc));
+void _tr_flush_block OF((deflate_state *s, charf *buf, ulg stored_len,
+ int eof));
+void _tr_align OF((deflate_state *s));
+void _tr_stored_block OF((deflate_state *s, charf *buf, ulg stored_len,
+ int eof));
+
+#define d_code(dist) \
+ ((dist) < 256 ? _dist_code[dist] : _dist_code[256+((dist)>>7)])
+/* Mapping from a distance to a distance code. dist is the distance - 1 and
+ * must not have side effects. _dist_code[256] and _dist_code[257] are never
+ * used.
+ */
+
+#ifndef DEBUG
+/* Inline versions of _tr_tally for speed: */
+
+#if defined(GEN_TREES_H) || !defined(STDC)
+ extern uch _length_code[];
+ extern uch _dist_code[];
+#else
+ extern const uch _length_code[];
+ extern const uch _dist_code[];
+#endif
+
+# define _tr_tally_lit(s, c, flush) \
+ { uch cc = (c); \
+ s->d_buf[s->last_lit] = 0; \
+ s->l_buf[s->last_lit++] = cc; \
+ s->dyn_ltree[cc].Freq++; \
+ flush = (s->last_lit == s->lit_bufsize-1); \
+ }
+# define _tr_tally_dist(s, distance, length, flush) \
+ { uch len = (length); \
+ ush dist = (distance); \
+ s->d_buf[s->last_lit] = dist; \
+ s->l_buf[s->last_lit++] = len; \
+ dist--; \
+ s->dyn_ltree[_length_code[len]+LITERALS+1].Freq++; \
+ s->dyn_dtree[d_code(dist)].Freq++; \
+ flush = (s->last_lit == s->lit_bufsize-1); \
+ }
+#else
+# define _tr_tally_lit(s, c, flush) flush = _tr_tally(s, 0, c)
+# define _tr_tally_dist(s, distance, length, flush) \
+ flush = _tr_tally(s, distance, length)
+#endif
+
+#endif
Added: trunk/orca/packages/rrdtool-1.0.7.2/zlib-1.1.3/infutil.c
==============================================================================
--- trunk/orca/packages/rrdtool-1.0.7.2/zlib-1.1.3/infutil.c (original)
+++ trunk/orca/packages/rrdtool-1.0.7.2/zlib-1.1.3/infutil.c Sat Jul 13 19:22:41 2002
@@ -0,0 +1,87 @@
+/* inflate_util.c -- data and routines common to blocks and codes
+ * Copyright (C) 1995-1998 Mark Adler
+ * For conditions of distribution and use, see copyright notice in zlib.h
+ */
+
+#include "zutil.h"
+#include "infblock.h"
+#include "inftrees.h"
+#include "infcodes.h"
+#include "infutil.h"
+
+struct inflate_codes_state {int dummy;}; /* for buggy compilers */
+
+/* And'ing with mask[n] masks the lower n bits */
+uInt inflate_mask[17] = {
+ 0x0000,
+ 0x0001, 0x0003, 0x0007, 0x000f, 0x001f, 0x003f, 0x007f, 0x00ff,
+ 0x01ff, 0x03ff, 0x07ff, 0x0fff, 0x1fff, 0x3fff, 0x7fff, 0xffff
+};
+
+
+/* copy as much as possible from the sliding window to the output area */
+int inflate_flush(s, z, r)
+inflate_blocks_statef *s;
+z_streamp z;
+int r;
+{
+ uInt n;
+ Bytef *p;
+ Bytef *q;
+
+ /* local copies of source and destination pointers */
+ p = z->next_out;
+ q = s->read;
+
+ /* compute number of bytes to copy as far as end of window */
+ n = (uInt)((q <= s->write ? s->write : s->end) - q);
+ if (n > z->avail_out) n = z->avail_out;
+ if (n && r == Z_BUF_ERROR) r = Z_OK;
+
+ /* update counters */
+ z->avail_out -= n;
+ z->total_out += n;
+
+ /* update check information */
+ if (s->checkfn != Z_NULL)
+ z->adler = s->check = (*s->checkfn)(s->check, q, n);
+
+ /* copy as far as end of window */
+ zmemcpy(p, q, n);
+ p += n;
+ q += n;
+
+ /* see if more to copy at beginning of window */
+ if (q == s->end)
+ {
+ /* wrap pointers */
+ q = s->window;
+ if (s->write == s->end)
+ s->write = s->window;
+
+ /* compute bytes to copy */
+ n = (uInt)(s->write - q);
+ if (n > z->avail_out) n = z->avail_out;
+ if (n && r == Z_BUF_ERROR) r = Z_OK;
+
+ /* update counters */
+ z->avail_out -= n;
+ z->total_out += n;
+
+ /* update check information */
+ if (s->checkfn != Z_NULL)
+ z->adler = s->check = (*s->checkfn)(s->check, q, n);
+
+ /* copy */
+ zmemcpy(p, q, n);
+ p += n;
+ q += n;
+ }
+
+ /* update pointers */
+ z->next_out = p;
+ s->read = q;
+
+ /* done */
+ return r;
+}
Added: trunk/orca/packages/rrdtool-1.0.7.2/zlib-1.1.3/crc32.c
==============================================================================
--- trunk/orca/packages/rrdtool-1.0.7.2/zlib-1.1.3/crc32.c (original)
+++ trunk/orca/packages/rrdtool-1.0.7.2/zlib-1.1.3/crc32.c Sat Jul 13 19:22:41 2002
@@ -0,0 +1,162 @@
+/* crc32.c -- compute the CRC-32 of a data stream
+ * Copyright (C) 1995-1998 Mark Adler
+ * For conditions of distribution and use, see copyright notice in zlib.h
+ */
+
+/* @(#) $Id$ */
+
+#include "zlib.h"
+
+#define local static
+
+#ifdef DYNAMIC_CRC_TABLE
+
+local int crc_table_empty = 1;
+local uLongf crc_table[256];
+local void make_crc_table OF((void));
+
+/*
+ Generate a table for a byte-wise 32-bit CRC calculation on the polynomial:
+ x^32+x^26+x^23+x^22+x^16+x^12+x^11+x^10+x^8+x^7+x^5+x^4+x^2+x+1.
+
+ Polynomials over GF(2) are represented in binary, one bit per coefficient,
+ with the lowest powers in the most significant bit. Then adding polynomials
+ is just exclusive-or, and multiplying a polynomial by x is a right shift by
+ one. If we call the above polynomial p, and represent a byte as the
+ polynomial q, also with the lowest power in the most significant bit (so the
+ byte 0xb1 is the polynomial x^7+x^3+x+1), then the CRC is (q*x^32) mod p,
+ where a mod b means the remainder after dividing a by b.
+
+ This calculation is done using the shift-register method of multiplying and
+ taking the remainder. The register is initialized to zero, and for each
+ incoming bit, x^32 is added mod p to the register if the bit is a one (where
+ x^32 mod p is p+x^32 = x^26+...+1), and the register is multiplied mod p by
+ x (which is shifting right by one and adding x^32 mod p if the bit shifted
+ out is a one). We start with the highest power (least significant bit) of
+ q and repeat for all eight bits of q.
+
+ The table is simply the CRC of all possible eight bit values. This is all
+ the information needed to generate CRC's on data a byte at a time for all
+ combinations of CRC register values and incoming bytes.
+*/
+local void make_crc_table()
+{
+ uLong c;
+ int n, k;
+ uLong poly; /* polynomial exclusive-or pattern */
+ /* terms of polynomial defining this crc (except x^32): */
+ static const Byte p[] = {0,1,2,4,5,7,8,10,11,12,16,22,23,26};
+
+ /* make exclusive-or pattern from polynomial (0xedb88320L) */
+ poly = 0L;
+ for (n = 0; n < sizeof(p)/sizeof(Byte); n++)
+ poly |= 1L << (31 - p[n]);
+
+ for (n = 0; n < 256; n++)
+ {
+ c = (uLong)n;
+ for (k = 0; k < 8; k++)
+ c = c & 1 ? poly ^ (c >> 1) : c >> 1;
+ crc_table[n] = c;
+ }
+ crc_table_empty = 0;
+}
+#else
+/* ========================================================================
+ * Table of CRC-32's of all single-byte values (made by make_crc_table)
+ */
+local const uLongf crc_table[256] = {
+ 0x00000000L, 0x77073096L, 0xee0e612cL, 0x990951baL, 0x076dc419L,
+ 0x706af48fL, 0xe963a535L, 0x9e6495a3L, 0x0edb8832L, 0x79dcb8a4L,
+ 0xe0d5e91eL, 0x97d2d988L, 0x09b64c2bL, 0x7eb17cbdL, 0xe7b82d07L,
+ 0x90bf1d91L, 0x1db71064L, 0x6ab020f2L, 0xf3b97148L, 0x84be41deL,
+ 0x1adad47dL, 0x6ddde4ebL, 0xf4d4b551L, 0x83d385c7L, 0x136c9856L,
+ 0x646ba8c0L, 0xfd62f97aL, 0x8a65c9ecL, 0x14015c4fL, 0x63066cd9L,
+ 0xfa0f3d63L, 0x8d080df5L, 0x3b6e20c8L, 0x4c69105eL, 0xd56041e4L,
+ 0xa2677172L, 0x3c03e4d1L, 0x4b04d447L, 0xd20d85fdL, 0xa50ab56bL,
+ 0x35b5a8faL, 0x42b2986cL, 0xdbbbc9d6L, 0xacbcf940L, 0x32d86ce3L,
+ 0x45df5c75L, 0xdcd60dcfL, 0xabd13d59L, 0x26d930acL, 0x51de003aL,
+ 0xc8d75180L, 0xbfd06116L, 0x21b4f4b5L, 0x56b3c423L, 0xcfba9599L,
+ 0xb8bda50fL, 0x2802b89eL, 0x5f058808L, 0xc60cd9b2L, 0xb10be924L,
+ 0x2f6f7c87L, 0x58684c11L, 0xc1611dabL, 0xb6662d3dL, 0x76dc4190L,
+ 0x01db7106L, 0x98d220bcL, 0xefd5102aL, 0x71b18589L, 0x06b6b51fL,
+ 0x9fbfe4a5L, 0xe8b8d433L, 0x7807c9a2L, 0x0f00f934L, 0x9609a88eL,
+ 0xe10e9818L, 0x7f6a0dbbL, 0x086d3d2dL, 0x91646c97L, 0xe6635c01L,
+ 0x6b6b51f4L, 0x1c6c6162L, 0x856530d8L, 0xf262004eL, 0x6c0695edL,
+ 0x1b01a57bL, 0x8208f4c1L, 0xf50fc457L, 0x65b0d9c6L, 0x12b7e950L,
+ 0x8bbeb8eaL, 0xfcb9887cL, 0x62dd1ddfL, 0x15da2d49L, 0x8cd37cf3L,
+ 0xfbd44c65L, 0x4db26158L, 0x3ab551ceL, 0xa3bc0074L, 0xd4bb30e2L,
+ 0x4adfa541L, 0x3dd895d7L, 0xa4d1c46dL, 0xd3d6f4fbL, 0x4369e96aL,
+ 0x346ed9fcL, 0xad678846L, 0xda60b8d0L, 0x44042d73L, 0x33031de5L,
+ 0xaa0a4c5fL, 0xdd0d7cc9L, 0x5005713cL, 0x270241aaL, 0xbe0b1010L,
+ 0xc90c2086L, 0x5768b525L, 0x206f85b3L, 0xb966d409L, 0xce61e49fL,
+ 0x5edef90eL, 0x29d9c998L, 0xb0d09822L, 0xc7d7a8b4L, 0x59b33d17L,
+ 0x2eb40d81L, 0xb7bd5c3bL, 0xc0ba6cadL, 0xedb88320L, 0x9abfb3b6L,
+ 0x03b6e20cL, 0x74b1d29aL, 0xead54739L, 0x9dd277afL, 0x04db2615L,
+ 0x73dc1683L, 0xe3630b12L, 0x94643b84L, 0x0d6d6a3eL, 0x7a6a5aa8L,
+ 0xe40ecf0bL, 0x9309ff9dL, 0x0a00ae27L, 0x7d079eb1L, 0xf00f9344L,
+ 0x8708a3d2L, 0x1e01f268L, 0x6906c2feL, 0xf762575dL, 0x806567cbL,
+ 0x196c3671L, 0x6e6b06e7L, 0xfed41b76L, 0x89d32be0L, 0x10da7a5aL,
+ 0x67dd4accL, 0xf9b9df6fL, 0x8ebeeff9L, 0x17b7be43L, 0x60b08ed5L,
+ 0xd6d6a3e8L, 0xa1d1937eL, 0x38d8c2c4L, 0x4fdff252L, 0xd1bb67f1L,
+ 0xa6bc5767L, 0x3fb506ddL, 0x48b2364bL, 0xd80d2bdaL, 0xaf0a1b4cL,
+ 0x36034af6L, 0x41047a60L, 0xdf60efc3L, 0xa867df55L, 0x316e8eefL,
+ 0x4669be79L, 0xcb61b38cL, 0xbc66831aL, 0x256fd2a0L, 0x5268e236L,
+ 0xcc0c7795L, 0xbb0b4703L, 0x220216b9L, 0x5505262fL, 0xc5ba3bbeL,
+ 0xb2bd0b28L, 0x2bb45a92L, 0x5cb36a04L, 0xc2d7ffa7L, 0xb5d0cf31L,
+ 0x2cd99e8bL, 0x5bdeae1dL, 0x9b64c2b0L, 0xec63f226L, 0x756aa39cL,
+ 0x026d930aL, 0x9c0906a9L, 0xeb0e363fL, 0x72076785L, 0x05005713L,
+ 0x95bf4a82L, 0xe2b87a14L, 0x7bb12baeL, 0x0cb61b38L, 0x92d28e9bL,
+ 0xe5d5be0dL, 0x7cdcefb7L, 0x0bdbdf21L, 0x86d3d2d4L, 0xf1d4e242L,
+ 0x68ddb3f8L, 0x1fda836eL, 0x81be16cdL, 0xf6b9265bL, 0x6fb077e1L,
+ 0x18b74777L, 0x88085ae6L, 0xff0f6a70L, 0x66063bcaL, 0x11010b5cL,
+ 0x8f659effL, 0xf862ae69L, 0x616bffd3L, 0x166ccf45L, 0xa00ae278L,
+ 0xd70dd2eeL, 0x4e048354L, 0x3903b3c2L, 0xa7672661L, 0xd06016f7L,
+ 0x4969474dL, 0x3e6e77dbL, 0xaed16a4aL, 0xd9d65adcL, 0x40df0b66L,
+ 0x37d83bf0L, 0xa9bcae53L, 0xdebb9ec5L, 0x47b2cf7fL, 0x30b5ffe9L,
+ 0xbdbdf21cL, 0xcabac28aL, 0x53b39330L, 0x24b4a3a6L, 0xbad03605L,
+ 0xcdd70693L, 0x54de5729L, 0x23d967bfL, 0xb3667a2eL, 0xc4614ab8L,
+ 0x5d681b02L, 0x2a6f2b94L, 0xb40bbe37L, 0xc30c8ea1L, 0x5a05df1bL,
+ 0x2d02ef8dL
+};
+#endif
+
+/* =========================================================================
+ * This function can be used by asm versions of crc32()
+ */
+const uLongf * ZEXPORT get_crc_table()
+{
+#ifdef DYNAMIC_CRC_TABLE
+ if (crc_table_empty) make_crc_table();
+#endif
+ return (const uLongf *)crc_table;
+}
+
+/* ========================================================================= */
+#define DO1(buf) crc = crc_table[((int)crc ^ (*buf++)) & 0xff] ^ (crc >> 8);
+#define DO2(buf) DO1(buf); DO1(buf);
+#define DO4(buf) DO2(buf); DO2(buf);
+#define DO8(buf) DO4(buf); DO4(buf);
+
+/* ========================================================================= */
+uLong ZEXPORT crc32(crc, buf, len)
+ uLong crc;
+ const Bytef *buf;
+ uInt len;
+{
+ if (buf == Z_NULL) return 0L;
+#ifdef DYNAMIC_CRC_TABLE
+ if (crc_table_empty)
+ make_crc_table();
+#endif
+ crc = crc ^ 0xffffffffL;
+ while (len >= 8)
+ {
+ DO8(buf);
+ len -= 8;
+ }
+ if (len) do {
+ DO1(buf);
+ } while (--len);
+ return crc ^ 0xffffffffL;
+}
Added: trunk/orca/packages/rrdtool-1.0.7.2/zlib-1.1.3/zutil.h
==============================================================================
--- trunk/orca/packages/rrdtool-1.0.7.2/zlib-1.1.3/zutil.h (original)
+++ trunk/orca/packages/rrdtool-1.0.7.2/zlib-1.1.3/zutil.h Sat Jul 13 19:22:41 2002
@@ -0,0 +1,220 @@
+/* zutil.h -- internal interface and configuration of the compression library
+ * Copyright (C) 1995-1998 Jean-loup Gailly.
+ * For conditions of distribution and use, see copyright notice in zlib.h
+ */
+
+/* WARNING: this file should *not* be used by applications. It is
+ part of the implementation of the compression library and is
+ subject to change. Applications should only use zlib.h.
+ */
+
+/* @(#) $Id$ */
+
+#ifndef _Z_UTIL_H
+#define _Z_UTIL_H
+
+#include "zlib.h"
+
+#ifdef STDC
+# include <stddef.h>
+# include <string.h>
+# include <stdlib.h>
+#endif
+#ifdef NO_ERRNO_H
+ extern int errno;
+#else
+# include <errno.h>
+#endif
+
+#ifndef local
+# define local static
+#endif
+/* compile with -Dlocal if your debugger can't find static symbols */
+
+typedef unsigned char uch;
+typedef uch FAR uchf;
+typedef unsigned short ush;
+typedef ush FAR ushf;
+typedef unsigned long ulg;
+
+extern const char *z_errmsg[10]; /* indexed by 2-zlib_error */
+/* (size given to avoid silly warnings with Visual C++) */
+
+#define ERR_MSG(err) z_errmsg[Z_NEED_DICT-(err)]
+
+#define ERR_RETURN(strm,err) \
+ return (strm->msg = (char*)ERR_MSG(err), (err))
+/* To be used only when the state is known to be valid */
+
+ /* common constants */
+
+#ifndef DEF_WBITS
+# define DEF_WBITS MAX_WBITS
+#endif
+/* default windowBits for decompression. MAX_WBITS is for compression only */
+
+#if MAX_MEM_LEVEL >= 8
+# define DEF_MEM_LEVEL 8
+#else
+# define DEF_MEM_LEVEL MAX_MEM_LEVEL
+#endif
+/* default memLevel */
+
+#define STORED_BLOCK 0
+#define STATIC_TREES 1
+#define DYN_TREES 2
+/* The three kinds of block type */
+
+#define MIN_MATCH 3
+#define MAX_MATCH 258
+/* The minimum and maximum match lengths */
+
+#define PRESET_DICT 0x20 /* preset dictionary flag in zlib header */
+
+ /* target dependencies */
+
+#ifdef MSDOS
+# define OS_CODE 0x00
+# if defined(__TURBOC__) || defined(__BORLANDC__)
+# if(__STDC__ == 1) && (defined(__LARGE__) || defined(__COMPACT__))
+ /* Allow compilation with ANSI keywords only enabled */
+ void _Cdecl farfree( void *block );
+ void *_Cdecl farmalloc( unsigned long nbytes );
+# else
+# include <alloc.h>
+# endif
+# else /* MSC or DJGPP */
+# include <malloc.h>
+# endif
+#endif
+
+#ifdef OS2
+# define OS_CODE 0x06
+#endif
+
+#ifdef WIN32 /* Window 95 & Windows NT */
+# define OS_CODE 0x0b
+#endif
+
+#if defined(VAXC) || defined(VMS)
+# define OS_CODE 0x02
+# define F_OPEN(name, mode) \
+ fopen((name), (mode), "mbc=60", "ctx=stm", "rfm=fix", "mrs=512")
+#endif
+
+#ifdef AMIGA
+# define OS_CODE 0x01
+#endif
+
+#if defined(ATARI) || defined(atarist)
+# define OS_CODE 0x05
+#endif
+
+#if defined(MACOS) || defined(TARGET_OS_MAC)
+# define OS_CODE 0x07
+# if defined(__MWERKS__) && __dest_os != __be_os && __dest_os != __win32_os
+# include <unix.h> /* for fdopen */
+# else
+# ifndef fdopen
+# define fdopen(fd,mode) NULL /* No fdopen() */
+# endif
+# endif
+#endif
+
+#ifdef __50SERIES /* Prime/PRIMOS */
+# define OS_CODE 0x0F
+#endif
+
+#ifdef TOPS20
+# define OS_CODE 0x0a
+#endif
+
+#if defined(_BEOS_) || defined(RISCOS)
+# define fdopen(fd,mode) NULL /* No fdopen() */
+#endif
+
+#if (defined(_MSC_VER) && (_MSC_VER > 600))
+# define fdopen(fd,type) _fdopen(fd,type)
+#endif
+
+
+ /* Common defaults */
+
+#ifndef OS_CODE
+# define OS_CODE 0x03 /* assume Unix */
+#endif
+
+#ifndef F_OPEN
+# define F_OPEN(name, mode) fopen((name), (mode))
+#endif
+
+ /* functions */
+
+#ifdef HAVE_STRERROR
+ extern char *strerror OF((int));
+# define zstrerror(errnum) strerror(errnum)
+#else
+# define zstrerror(errnum) ""
+#endif
+
+#if defined(pyr)
+# define NO_MEMCPY
+#endif
+#if defined(SMALL_MEDIUM) && !defined(_MSC_VER) && !defined(__SC__)
+ /* Use our own functions for small and medium model with MSC <= 5.0.
+ * You may have to use the same strategy for Borland C (untested).
+ * The __SC__ check is for Symantec.
+ */
+# define NO_MEMCPY
+#endif
+#if defined(STDC) && !defined(HAVE_MEMCPY) && !defined(NO_MEMCPY)
+# define HAVE_MEMCPY
+#endif
+#ifdef HAVE_MEMCPY
+# ifdef SMALL_MEDIUM /* MSDOS small or medium model */
+# define zmemcpy _fmemcpy
+# define zmemcmp _fmemcmp
+# define zmemzero(dest, len) _fmemset(dest, 0, len)
+# else
+# define zmemcpy memcpy
+# define zmemcmp memcmp
+# define zmemzero(dest, len) memset(dest, 0, len)
+# endif
+#else
+ extern void zmemcpy OF((Bytef* dest, const Bytef* source, uInt len));
+ extern int zmemcmp OF((const Bytef* s1, const Bytef* s2, uInt len));
+ extern void zmemzero OF((Bytef* dest, uInt len));
+#endif
+
+/* Diagnostic functions */
+#ifdef DEBUG
+# include <stdio.h>
+ extern int z_verbose;
+ extern void z_error OF((char *m));
+# define Assert(cond,msg) {if(!(cond)) z_error(msg);}
+# define Trace(x) {if (z_verbose>=0) fprintf x ;}
+# define Tracev(x) {if (z_verbose>0) fprintf x ;}
+# define Tracevv(x) {if (z_verbose>1) fprintf x ;}
+# define Tracec(c,x) {if (z_verbose>0 && (c)) fprintf x ;}
+# define Tracecv(c,x) {if (z_verbose>1 && (c)) fprintf x ;}
+#else
+# define Assert(cond,msg)
+# define Trace(x)
+# define Tracev(x)
+# define Tracevv(x)
+# define Tracec(c,x)
+# define Tracecv(c,x)
+#endif
+
+
+typedef uLong (ZEXPORT *check_func) OF((uLong check, const Bytef *buf,
+ uInt len));
+voidpf zcalloc OF((voidpf opaque, unsigned items, unsigned size));
+void zcfree OF((voidpf opaque, voidpf ptr));
+
+#define ZALLOC(strm, items, size) \
+ (*((strm)->zalloc))((strm)->opaque, (items), (size))
+#define ZFREE(strm, addr) (*((strm)->zfree))((strm)->opaque, (voidpf)(addr))
+#define TRY_FREE(s, p) {if (p) ZFREE(s, p);}
+
+#endif /* _Z_UTIL_H */
Added: trunk/orca/packages/rrdtool-1.0.7.2/zlib-1.1.3/infutil.h
==============================================================================
--- trunk/orca/packages/rrdtool-1.0.7.2/zlib-1.1.3/infutil.h (original)
+++ trunk/orca/packages/rrdtool-1.0.7.2/zlib-1.1.3/infutil.h Sat Jul 13 19:22:42 2002
@@ -0,0 +1,98 @@
+/* infutil.h -- types and macros common to blocks and codes
+ * Copyright (C) 1995-1998 Mark Adler
+ * For conditions of distribution and use, see copyright notice in zlib.h
+ */
+
+/* WARNING: this file should *not* be used by applications. It is
+ part of the implementation of the compression library and is
+ subject to change. Applications should only use zlib.h.
+ */
+
+#ifndef _INFUTIL_H
+#define _INFUTIL_H
+
+typedef enum {
+ TYPE, /* get type bits (3, including end bit) */
+ LENS, /* get lengths for stored */
+ STORED, /* processing stored block */
+ TABLE, /* get table lengths */
+ BTREE, /* get bit lengths tree for a dynamic block */
+ DTREE, /* get length, distance trees for a dynamic block */
+ CODES, /* processing fixed or dynamic block */
+ DRY, /* output remaining window bytes */
+ DONE, /* finished last block, done */
+ BAD} /* got a data error--stuck here */
+inflate_block_mode;
+
+/* inflate blocks semi-private state */
+struct inflate_blocks_state {
+
+ /* mode */
+ inflate_block_mode mode; /* current inflate_block mode */
+
+ /* mode dependent information */
+ union {
+ uInt left; /* if STORED, bytes left to copy */
+ struct {
+ uInt table; /* table lengths (14 bits) */
+ uInt index; /* index into blens (or border) */
+ uIntf *blens; /* bit lengths of codes */
+ uInt bb; /* bit length tree depth */
+ inflate_huft *tb; /* bit length decoding tree */
+ } trees; /* if DTREE, decoding info for trees */
+ struct {
+ inflate_codes_statef
+ *codes;
+ } decode; /* if CODES, current state */
+ } sub; /* submode */
+ uInt last; /* true if this block is the last block */
+
+ /* mode independent information */
+ uInt bitk; /* bits in bit buffer */
+ uLong bitb; /* bit buffer */
+ inflate_huft *hufts; /* single malloc for tree space */
+ Bytef *window; /* sliding window */
+ Bytef *end; /* one byte after sliding window */
+ Bytef *read; /* window read pointer */
+ Bytef *write; /* window write pointer */
+ check_func checkfn; /* check function */
+ uLong check; /* check on output */
+
+};
+
+
+/* defines for inflate input/output */
+/* update pointers and return */
+#define UPDBITS {s->bitb=b;s->bitk=k;}
+#define UPDIN {z->avail_in=n;z->total_in+=p-z->next_in;z->next_in=p;}
+#define UPDOUT {s->write=q;}
+#define UPDATE {UPDBITS UPDIN UPDOUT}
+#define LEAVE {UPDATE return inflate_flush(s,z,r);}
+/* get bytes and bits */
+#define LOADIN {p=z->next_in;n=z->avail_in;b=s->bitb;k=s->bitk;}
+#define NEEDBYTE {if(n)r=Z_OK;else LEAVE}
+#define NEXTBYTE (n--,*p++)
+#define NEEDBITS(j) {while(k<(j)){NEEDBYTE;b|=((uLong)NEXTBYTE)<<k;k+=8;}}
+#define DUMPBITS(j) {b>>=(j);k-=(j);}
+/* output bytes */
+#define WAVAIL (uInt)(q<s->read?s->read-q-1:s->end-q)
+#define LOADOUT {q=s->write;m=(uInt)WAVAIL;}
+#define WRAP {if(q==s->end&&s->read!=s->window){q=s->window;m=(uInt)WAVAIL;}}
+#define FLUSH {UPDOUT r=inflate_flush(s,z,r); LOADOUT}
+#define NEEDOUT {if(m==0){WRAP if(m==0){FLUSH WRAP if(m==0) LEAVE}}r=Z_OK;}
+#define OUTBYTE(a) {*q++=(Byte)(a);m--;}
+/* load local pointers */
+#define LOAD {LOADIN LOADOUT}
+
+/* masks for lower bits (size given to avoid silly warnings with Visual C++) */
+extern uInt inflate_mask[17];
+
+/* copy as much as possible from the sliding window to the output area */
+extern int inflate_flush OF((
+ inflate_blocks_statef *,
+ z_streamp ,
+ int));
+
+struct internal_state {int dummy;}; /* for buggy compilers */
+
+#endif
Added: trunk/orca/packages/rrdtool-1.0.7.2/zlib-1.1.3/trees.c
==============================================================================
--- trunk/orca/packages/rrdtool-1.0.7.2/zlib-1.1.3/trees.c (original)
+++ trunk/orca/packages/rrdtool-1.0.7.2/zlib-1.1.3/trees.c Sat Jul 13 19:22:42 2002
@@ -0,0 +1,1214 @@
+/* trees.c -- output deflated data using Huffman coding
+ * Copyright (C) 1995-1998 Jean-loup Gailly
+ * For conditions of distribution and use, see copyright notice in zlib.h
+ */
+
+/*
+ * ALGORITHM
+ *
+ * The "deflation" process uses several Huffman trees. The more
+ * common source values are represented by shorter bit sequences.
+ *
+ * Each code tree is stored in a compressed form which is itself
+ * a Huffman encoding of the lengths of all the code strings (in
+ * ascending order by source values). The actual code strings are
+ * reconstructed from the lengths in the inflate process, as described
+ * in the deflate specification.
+ *
+ * REFERENCES
+ *
+ * Deutsch, L.P.,"'Deflate' Compressed Data Format Specification".
+ * Available in ftp.uu.net:/pub/archiving/zip/doc/deflate-1.1.doc
+ *
+ * Storer, James A.
+ * Data Compression: Methods and Theory, pp. 49-50.
+ * Computer Science Press, 1988. ISBN 0-7167-8156-5.
+ *
+ * Sedgewick, R.
+ * Algorithms, p290.
+ * Addison-Wesley, 1983. ISBN 0-201-06672-6.
+ */
+
+/* @(#) $Id$ */
+
+/* #define GEN_TREES_H */
+
+#include "deflate.h"
+
+#ifdef DEBUG
+# include <ctype.h>
+#endif
+
+/* ===========================================================================
+ * Constants
+ */
+
+#define MAX_BL_BITS 7
+/* Bit length codes must not exceed MAX_BL_BITS bits */
+
+#define END_BLOCK 256
+/* end of block literal code */
+
+#define REP_3_6 16
+/* repeat previous bit length 3-6 times (2 bits of repeat count) */
+
+#define REPZ_3_10 17
+/* repeat a zero length 3-10 times (3 bits of repeat count) */
+
+#define REPZ_11_138 18
+/* repeat a zero length 11-138 times (7 bits of repeat count) */
+
+local const int extra_lbits[LENGTH_CODES] /* extra bits for each length code */
+ = {0,0,0,0,0,0,0,0,1,1,1,1,2,2,2,2,3,3,3,3,4,4,4,4,5,5,5,5,0};
+
+local const int extra_dbits[D_CODES] /* extra bits for each distance code */
+ = {0,0,0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,8,8,9,9,10,10,11,11,12,12,13,13};
+
+local const int extra_blbits[BL_CODES]/* extra bits for each bit length code */
+ = {0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,3,7};
+
+local const uch bl_order[BL_CODES]
+ = {16,17,18,0,8,7,9,6,10,5,11,4,12,3,13,2,14,1,15};
+/* The lengths of the bit length codes are sent in order of decreasing
+ * probability, to avoid transmitting the lengths for unused bit length codes.
+ */
+
+#define Buf_size (8 * 2*sizeof(char))
+/* Number of bits used within bi_buf. (bi_buf might be implemented on
+ * more than 16 bits on some systems.)
+ */
+
+/* ===========================================================================
+ * Local data. These are initialized only once.
+ */
+
+#define DIST_CODE_LEN 512 /* see definition of array dist_code below */
+
+#if defined(GEN_TREES_H) || !defined(STDC)
+/* non ANSI compilers may not accept trees.h */
+
+local ct_data static_ltree[L_CODES+2];
+/* The static literal tree. Since the bit lengths are imposed, there is no
+ * need for the L_CODES extra codes used during heap construction. However
+ * The codes 286 and 287 are needed to build a canonical tree (see _tr_init
+ * below).
+ */
+
+local ct_data static_dtree[D_CODES];
+/* The static distance tree. (Actually a trivial tree since all codes use
+ * 5 bits.)
+ */
+
+uch _dist_code[DIST_CODE_LEN];
+/* Distance codes. The first 256 values correspond to the distances
+ * 3 .. 258, the last 256 values correspond to the top 8 bits of
+ * the 15 bit distances.
+ */
+
+uch _length_code[MAX_MATCH-MIN_MATCH+1];
+/* length code for each normalized match length (0 == MIN_MATCH) */
+
+local int base_length[LENGTH_CODES];
+/* First normalized length for each code (0 = MIN_MATCH) */
+
+local int base_dist[D_CODES];
+/* First normalized distance for each code (0 = distance of 1) */
+
+#else
+# include "trees.h"
+#endif /* GEN_TREES_H */
+
+struct static_tree_desc_s {
+ const ct_data *static_tree; /* static tree or NULL */
+ const intf *extra_bits; /* extra bits for each code or NULL */
+ int extra_base; /* base index for extra_bits */
+ int elems; /* max number of elements in the tree */
+ int max_length; /* max bit length for the codes */
+};
+
+local static_tree_desc static_l_desc =
+{static_ltree, extra_lbits, LITERALS+1, L_CODES, MAX_BITS};
+
+local static_tree_desc static_d_desc =
+{static_dtree, extra_dbits, 0, D_CODES, MAX_BITS};
+
+local static_tree_desc static_bl_desc =
+{(const ct_data *)0, extra_blbits, 0, BL_CODES, MAX_BL_BITS};
+
+/* ===========================================================================
+ * Local (static) routines in this file.
+ */
+
+local void tr_static_init OF((void));
+local void init_block OF((deflate_state *s));
+local void pqdownheap OF((deflate_state *s, ct_data *tree, int k));
+local void gen_bitlen OF((deflate_state *s, tree_desc *desc));
+local void gen_codes OF((ct_data *tree, int max_code, ushf *bl_count));
+local void build_tree OF((deflate_state *s, tree_desc *desc));
+local void scan_tree OF((deflate_state *s, ct_data *tree, int max_code));
+local void send_tree OF((deflate_state *s, ct_data *tree, int max_code));
+local int build_bl_tree OF((deflate_state *s));
+local void send_all_trees OF((deflate_state *s, int lcodes, int dcodes,
+ int blcodes));
+local void compress_block OF((deflate_state *s, ct_data *ltree,
+ ct_data *dtree));
+local void set_data_type OF((deflate_state *s));
+local unsigned bi_reverse OF((unsigned value, int length));
+local void bi_windup OF((deflate_state *s));
+local void bi_flush OF((deflate_state *s));
+local void copy_block OF((deflate_state *s, charf *buf, unsigned len,
+ int header));
+
+#ifdef GEN_TREES_H
+local void gen_trees_header OF((void));
+#endif
+
+#ifndef DEBUG
+# define send_code(s, c, tree) send_bits(s, tree[c].Code, tree[c].Len)
+ /* Send a code of the given tree. c and tree must not have side effects */
+
+#else /* DEBUG */
+# define send_code(s, c, tree) \
+ { if (z_verbose>2) fprintf(stderr,"\ncd %3d ",(c)); \
+ send_bits(s, tree[c].Code, tree[c].Len); }
+#endif
+
+/* ===========================================================================
+ * Output a short LSB first on the stream.
+ * IN assertion: there is enough room in pendingBuf.
+ */
+#define put_short(s, w) { \
+ put_byte(s, (uch)((w) & 0xff)); \
+ put_byte(s, (uch)((ush)(w) >> 8)); \
+}
+
+/* ===========================================================================
+ * Send a value on a given number of bits.
+ * IN assertion: length <= 16 and value fits in length bits.
+ */
+#ifdef DEBUG
+local void send_bits OF((deflate_state *s, int value, int length));
+
+local void send_bits(s, value, length)
+ deflate_state *s;
+ int value; /* value to send */
+ int length; /* number of bits */
+{
+ Tracevv((stderr," l %2d v %4x ", length, value));
+ Assert(length > 0 && length <= 15, "invalid length");
+ s->bits_sent += (ulg)length;
+
+ /* If not enough room in bi_buf, use (valid) bits from bi_buf and
+ * (16 - bi_valid) bits from value, leaving (width - (16-bi_valid))
+ * unused bits in value.
+ */
+ if (s->bi_valid > (int)Buf_size - length) {
+ s->bi_buf |= (value << s->bi_valid);
+ put_short(s, s->bi_buf);
+ s->bi_buf = (ush)value >> (Buf_size - s->bi_valid);
+ s->bi_valid += length - Buf_size;
+ } else {
+ s->bi_buf |= value << s->bi_valid;
+ s->bi_valid += length;
+ }
+}
+#else /* !DEBUG */
+
+#define send_bits(s, value, length) \
+{ int len = length;\
+ if (s->bi_valid > (int)Buf_size - len) {\
+ int val = value;\
+ s->bi_buf |= (val << s->bi_valid);\
+ put_short(s, s->bi_buf);\
+ s->bi_buf = (ush)val >> (Buf_size - s->bi_valid);\
+ s->bi_valid += len - Buf_size;\
+ } else {\
+ s->bi_buf |= (value) << s->bi_valid;\
+ s->bi_valid += len;\
+ }\
+}
+#endif /* DEBUG */
+
+
+#define MAX(a,b) (a >= b ? a : b)
+/* the arguments must not have side effects */
+
+/* ===========================================================================
+ * Initialize the various 'constant' tables.
+ */
+local void tr_static_init()
+{
+#if defined(GEN_TREES_H) || !defined(STDC)
+ static int static_init_done = 0;
+ int n; /* iterates over tree elements */
+ int bits; /* bit counter */
+ int length; /* length value */
+ int code; /* code value */
+ int dist; /* distance index */
+ ush bl_count[MAX_BITS+1];
+ /* number of codes at each bit length for an optimal tree */
+
+ if (static_init_done) return;
+
+ /* For some embedded targets, global variables are not initialized: */
+ static_l_desc.static_tree = static_ltree;
+ static_l_desc.extra_bits = extra_lbits;
+ static_d_desc.static_tree = static_dtree;
+ static_d_desc.extra_bits = extra_dbits;
+ static_bl_desc.extra_bits = extra_blbits;
+
+ /* Initialize the mapping length (0..255) -> length code (0..28) */
+ length = 0;
+ for (code = 0; code < LENGTH_CODES-1; code++) {
+ base_length[code] = length;
+ for (n = 0; n < (1<<extra_lbits[code]); n++) {
+ _length_code[length++] = (uch)code;
+ }
+ }
+ Assert (length == 256, "tr_static_init: length != 256");
+ /* Note that the length 255 (match length 258) can be represented
+ * in two different ways: code 284 + 5 bits or code 285, so we
+ * overwrite length_code[255] to use the best encoding:
+ */
+ _length_code[length-1] = (uch)code;
+
+ /* Initialize the mapping dist (0..32K) -> dist code (0..29) */
+ dist = 0;
+ for (code = 0 ; code < 16; code++) {
+ base_dist[code] = dist;
+ for (n = 0; n < (1<<extra_dbits[code]); n++) {
+ _dist_code[dist++] = (uch)code;
+ }
+ }
+ Assert (dist == 256, "tr_static_init: dist != 256");
+ dist >>= 7; /* from now on, all distances are divided by 128 */
+ for ( ; code < D_CODES; code++) {
+ base_dist[code] = dist << 7;
+ for (n = 0; n < (1<<(extra_dbits[code]-7)); n++) {
+ _dist_code[256 + dist++] = (uch)code;
+ }
+ }
+ Assert (dist == 256, "tr_static_init: 256+dist != 512");
+
+ /* Construct the codes of the static literal tree */
+ for (bits = 0; bits <= MAX_BITS; bits++) bl_count[bits] = 0;
+ n = 0;
+ while (n <= 143) static_ltree[n++].Len = 8, bl_count[8]++;
+ while (n <= 255) static_ltree[n++].Len = 9, bl_count[9]++;
+ while (n <= 279) static_ltree[n++].Len = 7, bl_count[7]++;
+ while (n <= 287) static_ltree[n++].Len = 8, bl_count[8]++;
+ /* Codes 286 and 287 do not exist, but we must include them in the
+ * tree construction to get a canonical Huffman tree (longest code
+ * all ones)
+ */
+ gen_codes((ct_data *)static_ltree, L_CODES+1, bl_count);
+
+ /* The static distance tree is trivial: */
+ for (n = 0; n < D_CODES; n++) {
+ static_dtree[n].Len = 5;
+ static_dtree[n].Code = bi_reverse((unsigned)n, 5);
+ }
+ static_init_done = 1;
+
+# ifdef GEN_TREES_H
+ gen_trees_header();
+# endif
+#endif /* defined(GEN_TREES_H) || !defined(STDC) */
+}
+
+/* ===========================================================================
+ * Genererate the file trees.h describing the static trees.
+ */
+#ifdef GEN_TREES_H
+# ifndef DEBUG
+# include <stdio.h>
+# endif
+
+# define SEPARATOR(i, last, width) \
+ ((i) == (last)? "\n};\n\n" : \
+ ((i) % (width) == (width)-1 ? ",\n" : ", "))
+
+void gen_trees_header()
+{
+ FILE *header = fopen("trees.h", "w");
+ int i;
+
+ Assert (header != NULL, "Can't open trees.h");
+ fprintf(header,
+ "/* header created automatically with -DGEN_TREES_H */\n\n");
+
+ fprintf(header, "local const ct_data static_ltree[L_CODES+2] = {\n");
+ for (i = 0; i < L_CODES+2; i++) {
+ fprintf(header, "{{%3u},{%3u}}%s", static_ltree[i].Code,
+ static_ltree[i].Len, SEPARATOR(i, L_CODES+1, 5));
+ }
+
+ fprintf(header, "local const ct_data static_dtree[D_CODES] = {\n");
+ for (i = 0; i < D_CODES; i++) {
+ fprintf(header, "{{%2u},{%2u}}%s", static_dtree[i].Code,
+ static_dtree[i].Len, SEPARATOR(i, D_CODES-1, 5));
+ }
+
+ fprintf(header, "const uch _dist_code[DIST_CODE_LEN] = {\n");
+ for (i = 0; i < DIST_CODE_LEN; i++) {
+ fprintf(header, "%2u%s", _dist_code[i],
+ SEPARATOR(i, DIST_CODE_LEN-1, 20));
+ }
+
+ fprintf(header, "const uch _length_code[MAX_MATCH-MIN_MATCH+1]= {\n");
+ for (i = 0; i < MAX_MATCH-MIN_MATCH+1; i++) {
+ fprintf(header, "%2u%s", _length_code[i],
+ SEPARATOR(i, MAX_MATCH-MIN_MATCH, 20));
+ }
+
+ fprintf(header, "local const int base_length[LENGTH_CODES] = {\n");
+ for (i = 0; i < LENGTH_CODES; i++) {
+ fprintf(header, "%1u%s", base_length[i],
+ SEPARATOR(i, LENGTH_CODES-1, 20));
+ }
+
+ fprintf(header, "local const int base_dist[D_CODES] = {\n");
+ for (i = 0; i < D_CODES; i++) {
+ fprintf(header, "%5u%s", base_dist[i],
+ SEPARATOR(i, D_CODES-1, 10));
+ }
+
+ fclose(header);
+}
+#endif /* GEN_TREES_H */
+
+/* ===========================================================================
+ * Initialize the tree data structures for a new zlib stream.
+ */
+void _tr_init(s)
+ deflate_state *s;
+{
+ tr_static_init();
+
+ s->l_desc.dyn_tree = s->dyn_ltree;
+ s->l_desc.stat_desc = &static_l_desc;
+
+ s->d_desc.dyn_tree = s->dyn_dtree;
+ s->d_desc.stat_desc = &static_d_desc;
+
+ s->bl_desc.dyn_tree = s->bl_tree;
+ s->bl_desc.stat_desc = &static_bl_desc;
+
+ s->bi_buf = 0;
+ s->bi_valid = 0;
+ s->last_eob_len = 8; /* enough lookahead for inflate */
+#ifdef DEBUG
+ s->compressed_len = 0L;
+ s->bits_sent = 0L;
+#endif
+
+ /* Initialize the first block of the first file: */
+ init_block(s);
+}
+
+/* ===========================================================================
+ * Initialize a new block.
+ */
+local void init_block(s)
+ deflate_state *s;
+{
+ int n; /* iterates over tree elements */
+
+ /* Initialize the trees. */
+ for (n = 0; n < L_CODES; n++) s->dyn_ltree[n].Freq = 0;
+ for (n = 0; n < D_CODES; n++) s->dyn_dtree[n].Freq = 0;
+ for (n = 0; n < BL_CODES; n++) s->bl_tree[n].Freq = 0;
+
+ s->dyn_ltree[END_BLOCK].Freq = 1;
+ s->opt_len = s->static_len = 0L;
+ s->last_lit = s->matches = 0;
+}
+
+#define SMALLEST 1
+/* Index within the heap array of least frequent node in the Huffman tree */
+
+
+/* ===========================================================================
+ * Remove the smallest element from the heap and recreate the heap with
+ * one less element. Updates heap and heap_len.
+ */
+#define pqremove(s, tree, top) \
+{\
+ top = s->heap[SMALLEST]; \
+ s->heap[SMALLEST] = s->heap[s->heap_len--]; \
+ pqdownheap(s, tree, SMALLEST); \
+}
+
+/* ===========================================================================
+ * Compares to subtrees, using the tree depth as tie breaker when
+ * the subtrees have equal frequency. This minimizes the worst case length.
+ */
+#define smaller(tree, n, m, depth) \
+ (tree[n].Freq < tree[m].Freq || \
+ (tree[n].Freq == tree[m].Freq && depth[n] <= depth[m]))
+
+/* ===========================================================================
+ * Restore the heap property by moving down the tree starting at node k,
+ * exchanging a node with the smallest of its two sons if necessary, stopping
+ * when the heap property is re-established (each father smaller than its
+ * two sons).
+ */
+local void pqdownheap(s, tree, k)
+ deflate_state *s;
+ ct_data *tree; /* the tree to restore */
+ int k; /* node to move down */
+{
+ int v = s->heap[k];
+ int j = k << 1; /* left son of k */
+ while (j <= s->heap_len) {
+ /* Set j to the smallest of the two sons: */
+ if (j < s->heap_len &&
+ smaller(tree, s->heap[j+1], s->heap[j], s->depth)) {
+ j++;
+ }
+ /* Exit if v is smaller than both sons */
+ if (smaller(tree, v, s->heap[j], s->depth)) break;
+
+ /* Exchange v with the smallest son */
+ s->heap[k] = s->heap[j]; k = j;
+
+ /* And continue down the tree, setting j to the left son of k */
+ j <<= 1;
+ }
+ s->heap[k] = v;
+}
+
+/* ===========================================================================
+ * Compute the optimal bit lengths for a tree and update the total bit length
+ * for the current block.
+ * IN assertion: the fields freq and dad are set, heap[heap_max] and
+ * above are the tree nodes sorted by increasing frequency.
+ * OUT assertions: the field len is set to the optimal bit length, the
+ * array bl_count contains the frequencies for each bit length.
+ * The length opt_len is updated; static_len is also updated if stree is
+ * not null.
+ */
+local void gen_bitlen(s, desc)
+ deflate_state *s;
+ tree_desc *desc; /* the tree descriptor */
+{
+ ct_data *tree = desc->dyn_tree;
+ int max_code = desc->max_code;
+ const ct_data *stree = desc->stat_desc->static_tree;
+ const intf *extra = desc->stat_desc->extra_bits;
+ int base = desc->stat_desc->extra_base;
+ int max_length = desc->stat_desc->max_length;
+ int h; /* heap index */
+ int n, m; /* iterate over the tree elements */
+ int bits; /* bit length */
+ int xbits; /* extra bits */
+ ush f; /* frequency */
+ int overflow = 0; /* number of elements with bit length too large */
+
+ for (bits = 0; bits <= MAX_BITS; bits++) s->bl_count[bits] = 0;
+
+ /* In a first pass, compute the optimal bit lengths (which may
+ * overflow in the case of the bit length tree).
+ */
+ tree[s->heap[s->heap_max]].Len = 0; /* root of the heap */
+
+ for (h = s->heap_max+1; h < HEAP_SIZE; h++) {
+ n = s->heap[h];
+ bits = tree[tree[n].Dad].Len + 1;
+ if (bits > max_length) bits = max_length, overflow++;
+ tree[n].Len = (ush)bits;
+ /* We overwrite tree[n].Dad which is no longer needed */
+
+ if (n > max_code) continue; /* not a leaf node */
+
+ s->bl_count[bits]++;
+ xbits = 0;
+ if (n >= base) xbits = extra[n-base];
+ f = tree[n].Freq;
+ s->opt_len += (ulg)f * (bits + xbits);
+ if (stree) s->static_len += (ulg)f * (stree[n].Len + xbits);
+ }
+ if (overflow == 0) return;
+
+ Trace((stderr,"\nbit length overflow\n"));
+ /* This happens for example on obj2 and pic of the Calgary corpus */
+
+ /* Find the first bit length which could increase: */
+ do {
+ bits = max_length-1;
+ while (s->bl_count[bits] == 0) bits--;
+ s->bl_count[bits]--; /* move one leaf down the tree */
+ s->bl_count[bits+1] += 2; /* move one overflow item as its brother */
+ s->bl_count[max_length]--;
+ /* The brother of the overflow item also moves one step up,
+ * but this does not affect bl_count[max_length]
+ */
+ overflow -= 2;
+ } while (overflow > 0);
+
+ /* Now recompute all bit lengths, scanning in increasing frequency.
+ * h is still equal to HEAP_SIZE. (It is simpler to reconstruct all
+ * lengths instead of fixing only the wrong ones. This idea is taken
+ * from 'ar' written by Haruhiko Okumura.)
+ */
+ for (bits = max_length; bits != 0; bits--) {
+ n = s->bl_count[bits];
+ while (n != 0) {
+ m = s->heap[--h];
+ if (m > max_code) continue;
+ if (tree[m].Len != (unsigned) bits) {
+ Trace((stderr,"code %d bits %d->%d\n", m, tree[m].Len, bits));
+ s->opt_len += ((long)bits - (long)tree[m].Len)
+ *(long)tree[m].Freq;
+ tree[m].Len = (ush)bits;
+ }
+ n--;
+ }
+ }
+}
+
+/* ===========================================================================
+ * Generate the codes for a given tree and bit counts (which need not be
+ * optimal).
+ * IN assertion: the array bl_count contains the bit length statistics for
+ * the given tree and the field len is set for all tree elements.
+ * OUT assertion: the field code is set for all tree elements of non
+ * zero code length.
+ */
+local void gen_codes (tree, max_code, bl_count)
+ ct_data *tree; /* the tree to decorate */
+ int max_code; /* largest code with non zero frequency */
+ ushf *bl_count; /* number of codes at each bit length */
+{
+ ush next_code[MAX_BITS+1]; /* next code value for each bit le