[Orca-checkins] rev 197 - in trunk/orca: lib lib/SE lib/SE/3.2.1 lib/SE/3.3 data_gatherers/orcallator
sean at seanoneill.info
sean at seanoneill.info
Sun Jan 19 20:24:27 PST 2003
Author: sean at seanoneill.info
Date: 2003-01-19 20:24:17 -0800 (Sun, 19 Jan 2003)
New Revision: 197
Added:
trunk/orca/lib/SE/
trunk/orca/lib/SE/3.2.1/
trunk/orca/lib/SE/3.2.1/orca_p_vmstat_class.se
trunk/orca/lib/SE/3.3/
trunk/orca/lib/SE/3.3/orca_p_vmstat_class.se
Modified:
trunk/orca/data_gatherers/orcallator/orcallator.cfg.in
trunk/orca/data_gatherers/orcallator/orcallator.se
trunk/orca/data_gatherers/orcallator/start_orcallator.sh.in
trunk/orca/lib/Makefile.in
Log:
* Added new lib/SE/3.2.1 and lib/SE/3.3 directories to hold new
orca_p_vmstat_class.se file.
* orca_p_vmstat_class.se file added which contains definitions of new metrics
* start_orcallator modified to determine installed SE version which is used to
include the correct version of orca_p_vmstat_class.se file.
* orcallator.cfg modified to graph new metrics
* orcallator.se modified to collect new metrics
* Makefile.in modified to properly create new lib/SE/3.2.1 and lib/SE/3.3
directories and copy orca_p_vmstat_class.se files during installation
* New metrics include:
pgrec/s - page reclaims (include pageout)
pgfrec/s - page reclaims from free list
pgin/s - pageins
pages_in/s - pages paged in
pgout/s - pageouts
pages_out/s - pages paged out
dfree/s - pages freed by daemon or auto
min_fault/s - minor page faults
(pvm.hat_fault + pvm.as_fault)
maj_fault/s - major page faults
prot_fault/s - protection faults
cow_fault/s - copy-on-write faults
zfod/s - zero-fill-on-demand faults
interrupts/s - interrupts including clock
intrthreads/s - interrupts as threads
(below clock)
system_calls/s
context_switches/s
invol_switches/s - involuntary context switches
traps/s
forks/s
vforks/s
execs/s
namei/s - pathname lookups
ufsiget/s - ufs_iget() calls
ufsdirblk/s - directory blocks read
ufsinopage/s - inodes taken with no attached
pages
Modified: trunk/orca/lib/Makefile.in
==============================================================================
--- trunk/orca/lib/Makefile.in (original)
+++ trunk/orca/lib/Makefile.in 2003-01-19 20:24:26.000000000 -0800
@@ -40,6 +40,17 @@
done; \
fi
+ $(MKDIR) $(libdir)/SE/3.2.1
+ @for f in SE/3.2.1/*; do \
+ $(INSTALL) -m 0644 $$f $(libdir)/SE/3.2.1; \
+ echo $(INSTALL) -m 0644 $$f $(libdir)/SE/3.2.1; \
+ done
+ $(MKDIR) $(libdir)/SE/3.3
+ @for f in SE/3.3/*; do \
+ $(INSTALL) -m 0644 $$f $(libdir)/SE/3.3; \
+ echo $(INSTALL) -m 0644 $$f $(libdir)/SE/3.3; \
+ done
+
clean:
distclean: clean
Added: trunk/orca/lib/SE/3.2.1/orca_p_vmstat_class.se
==============================================================================
--- trunk/orca/lib/SE/3.2.1/orca_p_vmstat_class.se (original)
+++ trunk/orca/lib/SE/3.2.1/orca_p_vmstat_class.se 2003-01-19 20:24:26.000000000 -0800
@@ -0,0 +1,386 @@
+//
+// Copyright (c) 1993-2001 by Richard Pettit. All rights reserved.
+//
+// Some of this work was derived from include files containing the following
+// copyrights.
+//
+// Copyright (c) 1986-1994 by Sun Microsystems, Inc.
+// Copyright (c) 1983-1989 by AT&T
+// Copyright (c) 1980-1993 by The Regents of the University of California.
+//
+// The work as a whole represents unique intellectual property and is
+// copyright by Richard Pettit as shown on the first line.
+//
+
+#ifndef _P_VMSTAT_CLASS_SE_
+#define _P_VMSTAT_CLASS_SE_
+
+#include <unistd.se>
+#include <kstat.se>
+#include <sysdepend.se>
+
+/* for memory computation */
+#define PGTOK(n) (((n) * pagesize) / 1024)
+#define DELTA(name) (new_vminfo.name - old_vminfo.name)
+#define COMPUTE(name) PGTOK(DELTA(name) / updates)
+
+/* make the code easier on the eye */
+#define CSI_DIFF(name) \
+ pvmGLOB_cpu_sysinfo[i].name = \
+ (csi.name - pvmGLOB_old_cpu_sysinfo[i].name)
+#define CSI_PCT(name) \
+ pvmGLOB_cpu_sysinfo[i].name = (pvmGLOB_cpu_sysinfo[i].name / total) * 100.0
+
+#define CSIPCT(name) (100.0 * name / (hz * pvmGLOB_etime[i]))
+
+#define CVI_DIFF(name) \
+ pvmGLOB_cpu_vminfo[i].name = \
+ (cvi.name - pvmGLOB_old_cpu_vminfo[i].name)
+
+double pvmGLOB_etime[];
+ulong pvmGLOB_old_time[];
+ks_cpu_sysinfo pvmGLOB_cpu_sysinfo[];
+ks_cpu_sysinfo pvmGLOB_old_cpu_sysinfo[];
+ks_cpu_vminfo pvmGLOB_cpu_vminfo[];
+ks_cpu_vminfo pvmGLOB_old_cpu_vminfo[];
+
+int pvmGLOB_cpu_size;
+
+pvmGLOB_realloc()
+{
+ pvmGLOB_etime = renew pvmGLOB_etime [pvmGLOB_cpu_size];
+ pvmGLOB_old_time = renew pvmGLOB_old_time [pvmGLOB_cpu_size];
+ pvmGLOB_cpu_sysinfo = renew pvmGLOB_cpu_sysinfo [pvmGLOB_cpu_size];
+ pvmGLOB_old_cpu_sysinfo = renew pvmGLOB_old_cpu_sysinfo [pvmGLOB_cpu_size];
+ pvmGLOB_cpu_vminfo = renew pvmGLOB_cpu_vminfo [pvmGLOB_cpu_size];
+ pvmGLOB_old_cpu_vminfo = renew pvmGLOB_old_cpu_vminfo [pvmGLOB_cpu_size];
+}
+
+class p_vmstat {
+
+ /* which cpu */
+ int number$;
+ int instance; /* instance number from the kernel */
+ int ncpus; /* current total number of CPUs */
+
+ /* these values are numbers of procs */
+ double runque;
+ double waiting;
+ double swpque;
+
+ /* these values are in Kbytes */
+ int swap_avail;
+ int freemem;
+
+ double pgrec; /* page reclaims (include pageout) */
+ double pgfrec; /* page reclaims from free list */
+ double pgin; /* pageins */
+ double pgout; /* pageouts */
+ double pgswapin; /* pages swapped in */
+ double pgswapout; /* pages swapped out */
+ double dfree; /* pages freed by daemon or auto */
+
+ double hat_fault; /* minor page faults via hat_fault() */
+ double as_fault; /* minor page faults via as_fault() */
+ double maj_fault; /* major page faults */
+ double prot_fault; /* protection faults */
+ double cow_fault; /* copy-on-write faults */
+ double zfod; /* pages zero filled on demand */
+
+ double sysfork; /* forks */
+ double sysvfork; /* vforks */
+ double sysexec; /* execs */
+
+ double namei; /* pathname lookups */
+ double ufsiget; /* pathname lookups */
+ double ufsdirblk; /* directory block read */
+
+ double ufsinopage; /* inodes taken with no attached pgs */
+
+ /* these values are per second */
+ double pages_in; /* pages paged in */
+ double pages_out; /* pages paged out */
+ double swapins; /* swap-in occurrences */
+ double swapouts; /* swap-out occurrences */
+ double scan; /* pages scanned */
+ double pgrrun; /* how many times did pageout run */
+
+ double smtx; /* sleeps on a mutex per sec - mutex adaptive enter */
+ double interrupts; /* interrupts including clock */
+ double intrthread; /* interrupts as threads (below clock) */
+ double system_calls;
+ double context_switches;
+ double invol_switches;
+ double trap;
+
+ /* these are percentages of total over the last period */
+ double user_time;
+ double system_time;
+ double wait_time;
+ double idle_time;
+
+ p_vmstat$()
+ {
+ int i;
+ int n;
+ double updates;
+ int initialized = 0;
+ int last_number;
+ double hz = sysconf(_SC_CLK_TCK);
+ long pagesize = sysconf(_SC_PAGESIZE);
+ double total;
+ ulong itime;
+ ulong new_time;
+ ks_cpu_sysinfo csi;
+ ks_cpu_vminfo cvi;
+ ks_system_misc kstat$misc;
+ ks_sysinfo kstat$info;
+ ks_sysinfo old_sysinfo;
+ ks_sysinfo new_sysinfo;
+ ks_vminfo kstat$vminfo;
+ ks_vminfo old_vminfo;
+ ks_vminfo new_vminfo;
+
+ // allow this to change each time
+ ncpus = sysconf(_SC_NPROCESSORS_ONLN);
+
+ /* grab initial values */
+ if (initialized == 0) {
+ pvmGLOB_cpu_size = ncpus;
+
+ pvmGLOB_etime = new double[pvmGLOB_cpu_size];
+ pvmGLOB_old_time = new ulong[pvmGLOB_cpu_size];
+ pvmGLOB_cpu_sysinfo = new ks_cpu_sysinfo[pvmGLOB_cpu_size];
+ pvmGLOB_old_cpu_sysinfo = new ks_cpu_sysinfo[pvmGLOB_cpu_size];
+ pvmGLOB_cpu_vminfo = new ks_cpu_vminfo[pvmGLOB_cpu_size];
+ pvmGLOB_old_cpu_vminfo = new ks_cpu_vminfo[pvmGLOB_cpu_size];
+
+ n = kstat$misc.clk_intr;
+ for(i=0; i<ncpus; i++) {
+ csi.number$ = i;
+ refresh$(csi);
+ pvmGLOB_old_cpu_sysinfo[i] = csi;
+ cvi.number$ = i;
+ refresh$(cvi);
+ pvmGLOB_old_cpu_vminfo[i] = cvi;
+ pvmGLOB_old_time[i] = n;
+ pvmGLOB_etime[i] = n / hz;
+ }
+
+ /* memory values */
+ old_vminfo = kstat$vminfo;
+ old_sysinfo = kstat$info;
+
+ initialized = 1;
+ last_number = number$;
+
+ return;
+ }
+
+ // keep up with ncpus
+ if (ncpus > pvmGLOB_cpu_size) {
+ pvmGLOB_cpu_size = ncpus;
+ pvmGLOB_realloc();
+ }
+
+ /* select which cpu */
+ i = number$;
+ if (i < 0 || i >= ncpus) {
+ number$ = -1;
+ return;
+ }
+ instance = pvmGLOB_old_cpu_sysinfo[i].instance;
+
+ /* how much time has gone by */
+ new_time = kstat$misc.clk_intr;
+ itime = new_time - pvmGLOB_old_time[i];
+
+ /* no time has gone by, return */
+ if (itime == 0) {
+ if (i != last_number) {
+ smtx = pvmGLOB_cpu_sysinfo[i].mutex_adenters/pvmGLOB_etime[i];
+ interrupts = pvmGLOB_cpu_sysinfo[i].intr/pvmGLOB_etime[i];
+ intrthread = pvmGLOB_cpu_sysinfo[i].intrthread/pvmGLOB_etime[i];
+ system_calls = pvmGLOB_cpu_sysinfo[i].syscall/pvmGLOB_etime[i];
+ trap = pvmGLOB_cpu_sysinfo[i].trap/pvmGLOB_etime[i];
+ context_switches = pvmGLOB_cpu_sysinfo[i].pswitch/pvmGLOB_etime[i];
+ invol_switches = pvmGLOB_cpu_sysinfo[i].inv_swtch/pvmGLOB_etime[i];
+ user_time = CSIPCT(pvmGLOB_cpu_sysinfo[i].cpu[CPU_USER]);
+ system_time = CSIPCT(pvmGLOB_cpu_sysinfo[i].cpu[CPU_KERNEL]);
+ wait_time = CSIPCT(pvmGLOB_cpu_sysinfo[i].cpu[CPU_WAIT]);
+ idle_time = CSIPCT(pvmGLOB_cpu_sysinfo[i].cpu[CPU_IDLE]);
+
+ sysfork = pvmGLOB_cpu_sysinfo[i].sysfork/pvmGLOB_etime[i];
+ sysvfork = pvmGLOB_cpu_sysinfo[i].sysvfork/pvmGLOB_etime[i];
+ sysexec = pvmGLOB_cpu_sysinfo[i].sysexec/pvmGLOB_etime[i];
+
+ namei = pvmGLOB_cpu_sysinfo[i].namei/pvmGLOB_etime[i];
+ ufsiget = pvmGLOB_cpu_sysinfo[i].ufsiget/pvmGLOB_etime[i];
+ ufsdirblk = pvmGLOB_cpu_sysinfo[i].ufsdirblk/pvmGLOB_etime[i];
+
+ ufsinopage = pvmGLOB_cpu_sysinfo[i].ufsinopage/pvmGLOB_etime[i];
+
+ pgrec = pvmGLOB_cpu_vminfo[i].pgrec/pvmGLOB_etime[i];
+ pgfrec = pvmGLOB_cpu_vminfo[i].pgfrec/pvmGLOB_etime[i];
+ pgin = pvmGLOB_cpu_vminfo[i].pgin/pvmGLOB_etime[i];
+ pgout = pvmGLOB_cpu_vminfo[i].pgout/pvmGLOB_etime[i];
+ dfree = pvmGLOB_cpu_vminfo[i].dfree/pvmGLOB_etime[i];
+
+ hat_fault = pvmGLOB_cpu_vminfo[i].hat_fault/pvmGLOB_etime[i];
+ as_fault = pvmGLOB_cpu_vminfo[i].as_fault/pvmGLOB_etime[i];
+ maj_fault = pvmGLOB_cpu_vminfo[i].maj_fault/pvmGLOB_etime[i];
+ prot_fault = pvmGLOB_cpu_vminfo[i].prot_fault/pvmGLOB_etime[i];
+ cow_fault = pvmGLOB_cpu_vminfo[i].cow_fault/pvmGLOB_etime[i];
+ zfod = pvmGLOB_cpu_vminfo[i].zfod/pvmGLOB_etime[i];
+
+ pages_in = pvmGLOB_cpu_vminfo[i].pgpgin/pvmGLOB_etime[i];
+ pages_out = pvmGLOB_cpu_vminfo[i].pgpgout/pvmGLOB_etime[i];
+ swapins = pvmGLOB_cpu_vminfo[i].swapin/pvmGLOB_etime[i];
+ swapouts = pvmGLOB_cpu_vminfo[i].swapout/pvmGLOB_etime[i];
+ scan = pvmGLOB_cpu_vminfo[i].scan/pvmGLOB_etime[i];
+ pgrrun = pvmGLOB_cpu_vminfo[i].pgrrun/pvmGLOB_etime[i];
+
+ last_number = i;
+ }
+ return;
+ }
+ pvmGLOB_etime[i] = itime / hz;
+ pvmGLOB_old_time[i] = new_time;
+
+ csi.number$ = i;
+ refresh$(csi);
+
+ cvi.number$ = i;
+ refresh$(cvi);
+
+ new_sysinfo = kstat$info;
+ updates = new_sysinfo.updates - old_sysinfo.updates;
+ if (updates > 0.0) {
+ new_vminfo = kstat$vminfo;
+ }
+
+ /* compute cpu sysinfo diffs */
+ CSI_DIFF(mutex_adenters);
+ CSI_DIFF(intr);
+ CSI_DIFF(intrthread);
+ CSI_DIFF(syscall);
+ CSI_DIFF(pswitch);
+ CSI_DIFF(inv_swtch);
+ CSI_DIFF(trap);
+ CSI_DIFF(cpu[CPU_USER]);
+ CSI_DIFF(cpu[CPU_KERNEL]);
+ CSI_DIFF(cpu[CPU_WAIT]);
+ CSI_DIFF(cpu[CPU_IDLE]);
+ CSI_DIFF(wait[W_IO]);
+ CSI_DIFF(wait[W_SWAP]);
+ CSI_DIFF(wait[W_PIO]);
+
+ CSI_DIFF(sysfork);
+ CSI_DIFF(sysvfork);
+ CSI_DIFF(sysexec);
+
+ CSI_DIFF(namei);
+ CSI_DIFF(ufsiget);
+ CSI_DIFF(ufsdirblk);
+
+ CSI_DIFF(ufsinopage);
+
+ /* compute percentages
+ total = pvmGLOB_cpu_sysinfo[i].cpu[CPU_USER] +
+ pvmGLOB_cpu_sysinfo[i].cpu[CPU_KERNEL] +
+ pvmGLOB_cpu_sysinfo[i].cpu[CPU_WAIT] +
+ pvmGLOB_cpu_sysinfo[i].cpu[CPU_IDLE];
+ CSI_PCT(cpu[CPU_USER]);
+ CSI_PCT(cpu[CPU_KERNEL]);
+ CSI_PCT(cpu[CPU_WAIT]);
+ CSI_PCT(cpu[CPU_IDLE]);
+ */
+
+ /* save new values */
+ pvmGLOB_old_cpu_sysinfo[i] = csi;
+
+ CVI_DIFF(pgrec);
+ CVI_DIFF(pgfrec);
+ CVI_DIFF(pgin);
+ CVI_DIFF(pgout);
+ CVI_DIFF(dfree);
+
+ CVI_DIFF(hat_fault);
+ CVI_DIFF(as_fault);
+ CVI_DIFF(maj_fault);
+ CVI_DIFF(prot_fault);
+ CVI_DIFF(zfod);
+ CVI_DIFF(cow_fault);
+
+ /* compute page/swap values */
+ pvmGLOB_cpu_vminfo[i].pgpgin =
+ ((cvi.pgpgin - pvmGLOB_old_cpu_vminfo[i].pgpgin) * pagesize) / 1024;
+ pvmGLOB_cpu_vminfo[i].pgpgout =
+ ((cvi.pgpgout - pvmGLOB_old_cpu_vminfo[i].pgpgout) * pagesize) / 1024;
+ CVI_DIFF(swapin);
+ CVI_DIFF(swapout);
+ CVI_DIFF(scan);
+ CVI_DIFF(pgrrun);
+
+ /* save new values */
+ pvmGLOB_old_cpu_vminfo[i] = cvi;
+
+ /* update and return */
+ smtx = pvmGLOB_cpu_sysinfo[i].mutex_adenters/pvmGLOB_etime[i];
+ interrupts = pvmGLOB_cpu_sysinfo[i].intr/pvmGLOB_etime[i];
+ intrthread = pvmGLOB_cpu_sysinfo[i].intrthread/pvmGLOB_etime[i];
+ system_calls = pvmGLOB_cpu_sysinfo[i].syscall/pvmGLOB_etime[i];
+ context_switches = pvmGLOB_cpu_sysinfo[i].pswitch/pvmGLOB_etime[i];
+ invol_switches = pvmGLOB_cpu_sysinfo[i].inv_swtch/pvmGLOB_etime[i];
+ trap = pvmGLOB_cpu_sysinfo[i].trap/pvmGLOB_etime[i];
+ user_time = CSIPCT(pvmGLOB_cpu_sysinfo[i].cpu[CPU_USER]);
+ system_time = CSIPCT(pvmGLOB_cpu_sysinfo[i].cpu[CPU_KERNEL]);
+ wait_time = CSIPCT(pvmGLOB_cpu_sysinfo[i].cpu[CPU_WAIT]);
+ idle_time = CSIPCT(pvmGLOB_cpu_sysinfo[i].cpu[CPU_IDLE]);
+
+ sysfork = pvmGLOB_cpu_sysinfo[i].sysfork/pvmGLOB_etime[i];
+ sysvfork = pvmGLOB_cpu_sysinfo[i].sysvfork/pvmGLOB_etime[i];
+ sysexec = pvmGLOB_cpu_sysinfo[i].sysexec/pvmGLOB_etime[i];
+
+ namei = pvmGLOB_cpu_sysinfo[i].namei/pvmGLOB_etime[i];
+ ufsiget = pvmGLOB_cpu_sysinfo[i].ufsiget/pvmGLOB_etime[i];
+ ufsdirblk = pvmGLOB_cpu_sysinfo[i].ufsdirblk/pvmGLOB_etime[i];
+
+ ufsinopage = pvmGLOB_cpu_sysinfo[i].ufsinopage/pvmGLOB_etime[i];
+
+ pgrec = pvmGLOB_cpu_vminfo[i].pgrec/pvmGLOB_etime[i];
+ pgfrec = pvmGLOB_cpu_vminfo[i].pgfrec/pvmGLOB_etime[i];
+ pgin = pvmGLOB_cpu_vminfo[i].pgin/pvmGLOB_etime[i];
+ pgout = pvmGLOB_cpu_vminfo[i].pgout/pvmGLOB_etime[i];
+ dfree = pvmGLOB_cpu_vminfo[i].dfree/pvmGLOB_etime[i];
+
+ hat_fault = pvmGLOB_cpu_vminfo[i].hat_fault/pvmGLOB_etime[i];
+ as_fault = pvmGLOB_cpu_vminfo[i].as_fault/pvmGLOB_etime[i];
+ maj_fault = pvmGLOB_cpu_vminfo[i].maj_fault/pvmGLOB_etime[i];
+ prot_fault = pvmGLOB_cpu_vminfo[i].prot_fault/pvmGLOB_etime[i];
+ cow_fault = pvmGLOB_cpu_vminfo[i].cow_fault/pvmGLOB_etime[i];
+ zfod = pvmGLOB_cpu_vminfo[i].zfod/pvmGLOB_etime[i];
+
+ pages_in = pvmGLOB_cpu_vminfo[i].pgpgin/pvmGLOB_etime[i];
+ pages_out = pvmGLOB_cpu_vminfo[i].pgpgout/pvmGLOB_etime[i];
+ swapins = pvmGLOB_cpu_vminfo[i].swapin/pvmGLOB_etime[i];
+ swapouts = pvmGLOB_cpu_vminfo[i].swapout/pvmGLOB_etime[i];
+ scan = pvmGLOB_cpu_vminfo[i].scan/pvmGLOB_etime[i];
+ pgrrun = pvmGLOB_cpu_vminfo[i].pgrrun/pvmGLOB_etime[i];
+
+ if (updates > 0.0) {
+ freemem = COMPUTE(freemem);
+ swap_avail = COMPUTE(swap_avail);
+
+ runque = (new_sysinfo.runque - old_sysinfo.runque) / updates;
+ waiting = (new_sysinfo.waiting - old_sysinfo.waiting) / updates;
+ swpque = (new_sysinfo.swpque - old_sysinfo.swpque) / updates;
+
+ /* save old memory values */
+ old_sysinfo = new_sysinfo;
+ old_vminfo = new_vminfo;
+ }
+ }
+};
+
+#endif /* _P_VMSTAT_CLASS_SE_ */
Added: trunk/orca/lib/SE/3.3/orca_p_vmstat_class.se
==============================================================================
--- trunk/orca/lib/SE/3.3/orca_p_vmstat_class.se (original)
+++ trunk/orca/lib/SE/3.3/orca_p_vmstat_class.se 2003-01-19 20:24:26.000000000 -0800
@@ -0,0 +1,386 @@
+//
+// Copyright (c) 1993-2001 by Richard Pettit. All rights reserved.
+//
+// Some of this work was derived from include files containing the following
+// copyrights.
+//
+// Copyright (c) 1986-1994 by Sun Microsystems, Inc.
+// Copyright (c) 1983-1989 by AT&T
+// Copyright (c) 1980-1993 by The Regents of the University of California.
+//
+// The work as a whole represents unique intellectual property and is
+// copyright by Richard Pettit as shown on the first line.
+//
+
+#ifndef _P_VMSTAT_CLASS_SE_
+#define _P_VMSTAT_CLASS_SE_
+
+#include <unistd.se>
+#include <kstat.se>
+#include <sysdepend.se>
+
+/* for memory computation */
+#define PGTOK(n) (((n) * pagesize) / 1024)
+#define DELTA(name) (new_vminfo.name - old_vminfo.name)
+#define COMPUTE(name) PGTOK(DELTA(name) / updates)
+
+/* make the code easier on the eye */
+#define CSI_DIFF(name) \
+ pvmGLOB_cpu_sysinfo[i].name = \
+ (csi.name - pvmGLOB_old_cpu_sysinfo[i].name)
+#define CSI_PCT(name) \
+ pvmGLOB_cpu_sysinfo[i].name = (pvmGLOB_cpu_sysinfo[i].name / total) * 100.0
+
+#define CSIPCT(name) (100.0 * name / (hz * pvmGLOB_etime[i]))
+
+#define CVI_DIFF(name) \
+ pvmGLOB_cpu_vminfo[i].name = \
+ (cvi.name - pvmGLOB_old_cpu_vminfo[i].name)
+
+double pvmGLOB_etime[];
+ulong pvmGLOB_old_time[];
+ks_cpu_sysinfo pvmGLOB_cpu_sysinfo[];
+ks_cpu_sysinfo pvmGLOB_old_cpu_sysinfo[];
+ks_cpu_vminfo pvmGLOB_cpu_vminfo[];
+ks_cpu_vminfo pvmGLOB_old_cpu_vminfo[];
+
+int pvmGLOB_cpu_size;
+
+pvmGLOB_realloc()
+{
+ pvmGLOB_etime = renew pvmGLOB_etime [pvmGLOB_cpu_size];
+ pvmGLOB_old_time = renew pvmGLOB_old_time [pvmGLOB_cpu_size];
+ pvmGLOB_cpu_sysinfo = renew pvmGLOB_cpu_sysinfo [pvmGLOB_cpu_size];
+ pvmGLOB_old_cpu_sysinfo = renew pvmGLOB_old_cpu_sysinfo [pvmGLOB_cpu_size];
+ pvmGLOB_cpu_vminfo = renew pvmGLOB_cpu_vminfo [pvmGLOB_cpu_size];
+ pvmGLOB_old_cpu_vminfo = renew pvmGLOB_old_cpu_vminfo [pvmGLOB_cpu_size];
+}
+
+class p_vmstat {
+
+ /* which cpu */
+ int number$;
+ int instance; /* instance number from the kernel */
+ int ncpus; /* current total number of CPUs */
+
+ /* these values are numbers of procs */
+ double runque;
+ double waiting;
+ double swpque;
+
+ /* these values are in Kbytes */
+ int swap_avail;
+ int freemem;
+
+ double pgrec; /* page reclaims (include pageout) */
+ double pgfrec; /* page reclaims from free list */
+ double pgin; /* pageins */
+ double pgout; /* pageouts */
+ double pgswapin; /* pages swapped in */
+ double pgswapout; /* pages swapped out */
+ double dfree; /* pages freed by daemon or auto */
+
+ double hat_fault; /* minor page faults via hat_fault() */
+ double as_fault; /* minor page faults via as_fault() */
+ double maj_fault; /* major page faults */
+ double prot_fault; /* protection faults */
+ double cow_fault; /* copy-on-write faults */
+ double zfod; /* pages zero filled on demand */
+
+ double sysfork; /* forks */
+ double sysvfork; /* vforks */
+ double sysexec; /* execs */
+
+ double namei; /* pathname lookups */
+ double ufsiget; /* pathname lookups */
+ double ufsdirblk; /* directory block read */
+
+ double ufsinopage; /* inodes taken with no attached pgs */
+
+ /* these values are per second */
+ double pages_in; /* pages paged in */
+ double pages_out; /* pages paged out */
+ double swapins; /* swap-in occurrences */
+ double swapouts; /* swap-out occurrences */
+ double scan; /* pages scanned */
+ double pgrrun; /* how many times did pageout run */
+
+ double smtx; /* sleeps on a mutex per sec - mutex adaptive enter */
+ double interrupts; /* interrupts including clock */
+ double intrthread; /* interrupts as threads (below clock) */
+ double system_calls;
+ double context_switches;
+ double invol_switches;
+ double trap;
+
+ /* these are percentages of total over the last period */
+ double user_time;
+ double system_time;
+ double wait_time;
+ double idle_time;
+
+ p_vmstat$()
+ {
+ int i;
+ int n;
+ double updates;
+ int initialized = 0;
+ int last_number;
+ double hz = sysconf(_SC_CLK_TCK);
+ long pagesize = sysconf(_SC_PAGESIZE);
+ double total;
+ ulong itime;
+ ulong new_time;
+ ks_cpu_sysinfo csi;
+ ks_cpu_vminfo cvi;
+ ks_system_misc kstat$misc;
+ ks_sysinfo kstat$info;
+ ks_sysinfo old_sysinfo;
+ ks_sysinfo new_sysinfo;
+ ks_vminfo kstat$vminfo;
+ ks_vminfo old_vminfo;
+ ks_vminfo new_vminfo;
+
+ // allow this to change each time
+ ncpus = sysconf(_SC_NPROCESSORS_ONLN);
+
+ /* grab initial values */
+ if (initialized == 0) {
+ pvmGLOB_cpu_size = ncpus;
+
+ pvmGLOB_etime = new double[pvmGLOB_cpu_size];
+ pvmGLOB_old_time = new ulong[pvmGLOB_cpu_size];
+ pvmGLOB_cpu_sysinfo = new ks_cpu_sysinfo[pvmGLOB_cpu_size];
+ pvmGLOB_old_cpu_sysinfo = new ks_cpu_sysinfo[pvmGLOB_cpu_size];
+ pvmGLOB_cpu_vminfo = new ks_cpu_vminfo[pvmGLOB_cpu_size];
+ pvmGLOB_old_cpu_vminfo = new ks_cpu_vminfo[pvmGLOB_cpu_size];
+
+ n = kstat$misc.clk_intr;
+ for(i=0; i<ncpus; i++) {
+ csi.number$ = i;
+ refresh$(csi);
+ pvmGLOB_old_cpu_sysinfo[i] = csi;
+ cvi.number$ = i;
+ refresh$(cvi);
+ pvmGLOB_old_cpu_vminfo[i] = cvi;
+ pvmGLOB_old_time[i] = n;
+ pvmGLOB_etime[i] = n / hz;
+ }
+
+ /* memory values */
+ old_vminfo = kstat$vminfo;
+ old_sysinfo = kstat$info;
+
+ initialized = 1;
+ last_number = number$;
+
+ return;
+ }
+
+ // keep up with ncpus
+ if (ncpus > pvmGLOB_cpu_size) {
+ pvmGLOB_cpu_size = ncpus;
+ pvmGLOB_realloc();
+ }
+
+ /* select which cpu */
+ i = number$;
+ if (i < 0 || i >= ncpus) {
+ number$ = -1;
+ return;
+ }
+ instance = pvmGLOB_old_cpu_sysinfo[i].instance$;
+
+ /* how much time has gone by */
+ new_time = kstat$misc.clk_intr;
+ itime = new_time - pvmGLOB_old_time[i];
+
+ /* no time has gone by, return */
+ if (itime == 0) {
+ if (i != last_number) {
+ smtx = pvmGLOB_cpu_sysinfo[i].mutex_adenters/pvmGLOB_etime[i];
+ interrupts = pvmGLOB_cpu_sysinfo[i].intr/pvmGLOB_etime[i];
+ intrthread = pvmGLOB_cpu_sysinfo[i].intrthread/pvmGLOB_etime[i];
+ system_calls = pvmGLOB_cpu_sysinfo[i].syscall/pvmGLOB_etime[i];
+ trap = pvmGLOB_cpu_sysinfo[i].trap/pvmGLOB_etime[i];
+ context_switches = pvmGLOB_cpu_sysinfo[i].pswitch/pvmGLOB_etime[i];
+ invol_switches = pvmGLOB_cpu_sysinfo[i].inv_swtch/pvmGLOB_etime[i];
+ user_time = CSIPCT(pvmGLOB_cpu_sysinfo[i].cpu[CPU_USER]);
+ system_time = CSIPCT(pvmGLOB_cpu_sysinfo[i].cpu[CPU_KERNEL]);
+ wait_time = CSIPCT(pvmGLOB_cpu_sysinfo[i].cpu[CPU_WAIT]);
+ idle_time = CSIPCT(pvmGLOB_cpu_sysinfo[i].cpu[CPU_IDLE]);
+
+ sysfork = pvmGLOB_cpu_sysinfo[i].sysfork/pvmGLOB_etime[i];
+ sysvfork = pvmGLOB_cpu_sysinfo[i].sysvfork/pvmGLOB_etime[i];
+ sysexec = pvmGLOB_cpu_sysinfo[i].sysexec/pvmGLOB_etime[i];
+
+ namei = pvmGLOB_cpu_sysinfo[i].namei/pvmGLOB_etime[i];
+ ufsiget = pvmGLOB_cpu_sysinfo[i].ufsiget/pvmGLOB_etime[i];
+ ufsdirblk = pvmGLOB_cpu_sysinfo[i].ufsdirblk/pvmGLOB_etime[i];
+
+ ufsinopage = pvmGLOB_cpu_sysinfo[i].ufsinopage/pvmGLOB_etime[i];
+
+ pgrec = pvmGLOB_cpu_vminfo[i].pgrec/pvmGLOB_etime[i];
+ pgfrec = pvmGLOB_cpu_vminfo[i].pgfrec/pvmGLOB_etime[i];
+ pgin = pvmGLOB_cpu_vminfo[i].pgin/pvmGLOB_etime[i];
+ pgout = pvmGLOB_cpu_vminfo[i].pgout/pvmGLOB_etime[i];
+ dfree = pvmGLOB_cpu_vminfo[i].dfree/pvmGLOB_etime[i];
+
+ hat_fault = pvmGLOB_cpu_vminfo[i].hat_fault/pvmGLOB_etime[i];
+ as_fault = pvmGLOB_cpu_vminfo[i].as_fault/pvmGLOB_etime[i];
+ maj_fault = pvmGLOB_cpu_vminfo[i].maj_fault/pvmGLOB_etime[i];
+ prot_fault = pvmGLOB_cpu_vminfo[i].prot_fault/pvmGLOB_etime[i];
+ cow_fault = pvmGLOB_cpu_vminfo[i].cow_fault/pvmGLOB_etime[i];
+ zfod = pvmGLOB_cpu_vminfo[i].zfod/pvmGLOB_etime[i];
+
+ pages_in = pvmGLOB_cpu_vminfo[i].pgpgin/pvmGLOB_etime[i];
+ pages_out = pvmGLOB_cpu_vminfo[i].pgpgout/pvmGLOB_etime[i];
+ swapins = pvmGLOB_cpu_vminfo[i].swapin/pvmGLOB_etime[i];
+ swapouts = pvmGLOB_cpu_vminfo[i].swapout/pvmGLOB_etime[i];
+ scan = pvmGLOB_cpu_vminfo[i].scan/pvmGLOB_etime[i];
+ pgrrun = pvmGLOB_cpu_vminfo[i].pgrrun/pvmGLOB_etime[i];
+
+ last_number = i;
+ }
+ return;
+ }
+ pvmGLOB_etime[i] = itime / hz;
+ pvmGLOB_old_time[i] = new_time;
+
+ csi.number$ = i;
+ refresh$(csi);
+
+ cvi.number$ = i;
+ refresh$(cvi);
+
+ new_sysinfo = kstat$info;
+ updates = new_sysinfo.updates - old_sysinfo.updates;
+ if (updates > 0.0) {
+ new_vminfo = kstat$vminfo;
+ }
+
+ /* compute cpu sysinfo diffs */
+ CSI_DIFF(mutex_adenters);
+ CSI_DIFF(intr);
+ CSI_DIFF(intrthread);
+ CSI_DIFF(syscall);
+ CSI_DIFF(pswitch);
+ CSI_DIFF(inv_swtch);
+ CSI_DIFF(trap);
+ CSI_DIFF(cpu[CPU_USER]);
+ CSI_DIFF(cpu[CPU_KERNEL]);
+ CSI_DIFF(cpu[CPU_WAIT]);
+ CSI_DIFF(cpu[CPU_IDLE]);
+ CSI_DIFF(wait[W_IO]);
+ CSI_DIFF(wait[W_SWAP]);
+ CSI_DIFF(wait[W_PIO]);
+
+ CSI_DIFF(sysfork);
+ CSI_DIFF(sysvfork);
+ CSI_DIFF(sysexec);
+
+ CSI_DIFF(namei);
+ CSI_DIFF(ufsiget);
+ CSI_DIFF(ufsdirblk);
+
+ CSI_DIFF(ufsinopage);
+
+ /* compute percentages
+ total = pvmGLOB_cpu_sysinfo[i].cpu[CPU_USER] +
+ pvmGLOB_cpu_sysinfo[i].cpu[CPU_KERNEL] +
+ pvmGLOB_cpu_sysinfo[i].cpu[CPU_WAIT] +
+ pvmGLOB_cpu_sysinfo[i].cpu[CPU_IDLE];
+ CSI_PCT(cpu[CPU_USER]);
+ CSI_PCT(cpu[CPU_KERNEL]);
+ CSI_PCT(cpu[CPU_WAIT]);
+ CSI_PCT(cpu[CPU_IDLE]);
+ */
+
+ /* save new values */
+ pvmGLOB_old_cpu_sysinfo[i] = csi;
+
+ CVI_DIFF(pgrec);
+ CVI_DIFF(pgfrec);
+ CVI_DIFF(pgin);
+ CVI_DIFF(pgout);
+ CVI_DIFF(dfree);
+
+ CVI_DIFF(hat_fault);
+ CVI_DIFF(as_fault);
+ CVI_DIFF(maj_fault);
+ CVI_DIFF(prot_fault);
+ CVI_DIFF(zfod);
+ CVI_DIFF(cow_fault);
+
+ /* compute page/swap values */
+ pvmGLOB_cpu_vminfo[i].pgpgin =
+ ((cvi.pgpgin - pvmGLOB_old_cpu_vminfo[i].pgpgin) * pagesize) / 1024;
+ pvmGLOB_cpu_vminfo[i].pgpgout =
+ ((cvi.pgpgout - pvmGLOB_old_cpu_vminfo[i].pgpgout) * pagesize) / 1024;
+ CVI_DIFF(swapin);
+ CVI_DIFF(swapout);
+ CVI_DIFF(scan);
+ CVI_DIFF(pgrrun);
+
+ /* save new values */
+ pvmGLOB_old_cpu_vminfo[i] = cvi;
+
+ /* update and return */
+ smtx = pvmGLOB_cpu_sysinfo[i].mutex_adenters/pvmGLOB_etime[i];
+ interrupts = pvmGLOB_cpu_sysinfo[i].intr/pvmGLOB_etime[i];
+ intrthread = pvmGLOB_cpu_sysinfo[i].intrthread/pvmGLOB_etime[i];
+ system_calls = pvmGLOB_cpu_sysinfo[i].syscall/pvmGLOB_etime[i];
+ context_switches = pvmGLOB_cpu_sysinfo[i].pswitch/pvmGLOB_etime[i];
+ invol_switches = pvmGLOB_cpu_sysinfo[i].inv_swtch/pvmGLOB_etime[i];
+ trap = pvmGLOB_cpu_sysinfo[i].trap/pvmGLOB_etime[i];
+ user_time = CSIPCT(pvmGLOB_cpu_sysinfo[i].cpu[CPU_USER]);
+ system_time = CSIPCT(pvmGLOB_cpu_sysinfo[i].cpu[CPU_KERNEL]);
+ wait_time = CSIPCT(pvmGLOB_cpu_sysinfo[i].cpu[CPU_WAIT]);
+ idle_time = CSIPCT(pvmGLOB_cpu_sysinfo[i].cpu[CPU_IDLE]);
+
+ sysfork = pvmGLOB_cpu_sysinfo[i].sysfork/pvmGLOB_etime[i];
+ sysvfork = pvmGLOB_cpu_sysinfo[i].sysvfork/pvmGLOB_etime[i];
+ sysexec = pvmGLOB_cpu_sysinfo[i].sysexec/pvmGLOB_etime[i];
+
+ namei = pvmGLOB_cpu_sysinfo[i].namei/pvmGLOB_etime[i];
+ ufsiget = pvmGLOB_cpu_sysinfo[i].ufsiget/pvmGLOB_etime[i];
+ ufsdirblk = pvmGLOB_cpu_sysinfo[i].ufsdirblk/pvmGLOB_etime[i];
+
+ ufsinopage = pvmGLOB_cpu_sysinfo[i].ufsinopage/pvmGLOB_etime[i];
+
+ pgrec = pvmGLOB_cpu_vminfo[i].pgrec/pvmGLOB_etime[i];
+ pgfrec = pvmGLOB_cpu_vminfo[i].pgfrec/pvmGLOB_etime[i];
+ pgin = pvmGLOB_cpu_vminfo[i].pgin/pvmGLOB_etime[i];
+ pgout = pvmGLOB_cpu_vminfo[i].pgout/pvmGLOB_etime[i];
+ dfree = pvmGLOB_cpu_vminfo[i].dfree/pvmGLOB_etime[i];
+
+ hat_fault = pvmGLOB_cpu_vminfo[i].hat_fault/pvmGLOB_etime[i];
+ as_fault = pvmGLOB_cpu_vminfo[i].as_fault/pvmGLOB_etime[i];
+ maj_fault = pvmGLOB_cpu_vminfo[i].maj_fault/pvmGLOB_etime[i];
+ prot_fault = pvmGLOB_cpu_vminfo[i].prot_fault/pvmGLOB_etime[i];
+ cow_fault = pvmGLOB_cpu_vminfo[i].cow_fault/pvmGLOB_etime[i];
+ zfod = pvmGLOB_cpu_vminfo[i].zfod/pvmGLOB_etime[i];
+
+ pages_in = pvmGLOB_cpu_vminfo[i].pgpgin/pvmGLOB_etime[i];
+ pages_out = pvmGLOB_cpu_vminfo[i].pgpgout/pvmGLOB_etime[i];
+ swapins = pvmGLOB_cpu_vminfo[i].swapin/pvmGLOB_etime[i];
+ swapouts = pvmGLOB_cpu_vminfo[i].swapout/pvmGLOB_etime[i];
+ scan = pvmGLOB_cpu_vminfo[i].scan/pvmGLOB_etime[i];
+ pgrrun = pvmGLOB_cpu_vminfo[i].pgrrun/pvmGLOB_etime[i];
+
+ if (updates > 0.0) {
+ freemem = COMPUTE(freemem);
+ swap_avail = COMPUTE(swap_avail);
+
+ runque = (new_sysinfo.runque - old_sysinfo.runque) / updates;
+ waiting = (new_sysinfo.waiting - old_sysinfo.waiting) / updates;
+ swpque = (new_sysinfo.swpque - old_sysinfo.swpque) / updates;
+
+ /* save old memory values */
+ old_sysinfo = new_sysinfo;
+ old_vminfo = new_vminfo;
+ }
+ }
+};
+
+#endif /* _P_VMSTAT_CLASS_SE_ */
Modified: trunk/orca/data_gatherers/orcallator/orcallator.cfg.in
==============================================================================
--- trunk/orca/data_gatherers/orcallator/orcallator.cfg.in (original)
+++ trunk/orca/data_gatherers/orcallator/orcallator.cfg.in 2003-01-19 20:24:26.000000000 -0800
@@ -533,6 +533,57 @@
}
plot {
+title %g Interrupts/s, System Calls/s & Traps/s
+source orcallator
+data interrupts/s
+data intrthread/s
+data system_calls/s
+data traps/s
+line_type area
+line_type line1
+line_type line1
+line_type line1
+legend Interrupts
+legend Interrupts as threads
+legend System Calls
+legend Traps
+y_legend calls/s
+data_min 0
+}
+
+plot {
+title %g Forks/s, Vforks/s & Execs/s
+source orcallator
+data forks/s
+data vforks/s
+data execs/s
+line_type area
+line_type line1
+line_type line1
+legend forks/s
+legend vforks/s
+legend execs/s
+y_legend calls/s
+data_min 0
+}
+
+plot {
+title %g Context Switches/s
+source orcallator
+data context_switches/s - invol_switches/s
+data invol_switches/s
+data context_switches/s
+line_type area
+line_type stack
+line_type line1
+legend Voluntary Switches/s
+legend Involuntary Switches/s
+legend Total
+y_legend Switches/s
+data_min 0
+}
+
+plot {
title %g NFS Server Call Rate
source orcallator
data nfss_calls
@@ -737,11 +788,30 @@
}
plot {
+title %g Pathname lookups, UFS iget calls & UFS dir. block reads/s
+source orcallator
+data namei/s
+data ufsiget/s
+data ufsdirblk/s
+line_type area
+line_type line1
+line_type line1
+legend namei/s
+legend ufsiget/s
+legend ufsdirblk/s
+y_legend calls/s
+data_min 0
+}
+
+plot {
title %g Cache Inode Steal Rate
source orcallator
data inod_stl/s
+data ufsinopage/s
line_type area
+line_type line1
legend Inode w/page steals/s
+legend Inode wo/page steals/s
y_legend Steals/s
data_min 0
href http://www.orcaware.com/orca/docs/orcallator.html#inode_steal_rate
@@ -818,6 +888,68 @@
}
plot {
+title %g Pageins/s & Pageouts/s
+source orcallator
+data pgin/s
+data pgout/s
+line_type area
+line_type line1
+legend Page Ins
+legend Page Outs
+y_legend Events/s
+data_min 0
+plot_min 0
+}
+
+plot {
+title %g Pages PagedIn & Pages PagedOut & Pages Freed/s
+source orcallator
+data pages_in/s
+data pages_out/s
+data dfree/s
+line_type area
+line_type line1
+line_type line1
+legend Pages Paged In
+legend Pages Paged Out
+legend Pages Freed
+y_legend Pages/s
+data_min 0
+plot_min 0
+}
+
+plot {
+title %g Major, Minor & Protection Page Faults/s
+source orcallator
+data maj_fault/s
+data min_fault/s
+data prot_fault/s
+line_type area
+line_type line1
+line_type line1
+legend Major Faults
+legend Minor Faults
+legend Protection Faults
+y_legend Faults/s
+data_min 0
+plot_min 0
+}
+
+plot {
+title %g Copy-on-write & Zero-fill-on-demand Page Faults/s
+source orcallator
+data cow_fault/s
+data zfod/s
+line_type area
+line_type line1
+legend Copy-on-write
+legend Zero-fill-on-demand
+y_legend Faults/s
+data_min 0
+plot_min 0
+}
+
+plot {
title %g Memory Pages Locked & IO
source orcallator
data pageslock
Modified: trunk/orca/data_gatherers/orcallator/orcallator.se
==============================================================================
--- trunk/orca/data_gatherers/orcallator/orcallator.se (original)
+++ trunk/orca/data_gatherers/orcallator/orcallator.se 2003-01-19 20:24:26.000000000 -0800
@@ -8,6 +8,38 @@
//
// Portions copied from percollator.se written by Adrian Cockroft.
//
+// Version 1.38: Jan 14, 2003 Additions by Sean O'Neill
+// Added several new variables for Orca to collect
+// and graph including:
+// pgrec/s - page reclaims (include pageout)
+// pgfrec/s - page reclaims from free list
+// pgin/s - pageins
+// pages_in/s - pages paged in
+// pgout/s - pageouts
+// pages_out/s - pages paged out
+// dfree/s - pages freed by daemon or auto
+// min_fault/s - minor page faults
+// (pvm.hat_fault + pvm.as_fault)
+// maj_fault/s - major page faults
+// prot_fault/s - protection faults
+// cow_fault/s - copy-on-write faults
+// zfod/s - zero-fill-on-demand faults
+// interrupts/s - interrupts including clock
+// intrthreads/s - interrupts as threads
+// (below clock)
+// system_calls/s
+// context_switches/s
+// invol_switches/s - involuntary context switches
+// traps/s
+// forks/s
+// vforks/s
+// execs/s
+// namei/s - pathname lookups
+// ufsiget/s - ufs_iget() calls
+// ufsdirblk/s - directory blocks read
+// ufsinopage/s - inodes taken with no attached
+// pages
+//
// Version 1.37: Dec 28, 2002 Improved algorithm in raw_disk_map() which
// detects the end of the GLOBAL_disk_info array.
// To deal with occasions when the first disk is a
@@ -339,7 +371,8 @@
#include <p_iostat_class.se>
#include <p_netstat_class.se>
-#include <p_vmstat_class.se>
+//#include <p_vmstat_class.se>
+#include <orca_p_vmstat_class.se>
#include <pure_rules.se>
#include <live_rules.se>
#include <mib.se>
@@ -1523,6 +1556,31 @@
put_output("#waiting", sprintf("%8.2f", pvm.waiting + 0.0));
put_output(" #swpque", sprintf("%8.2f", pvm.swpque + 0.0));
put_output("scanrate", sprintf("%8.3f", pvm.scan + 0.0));
+ put_output("pgrec/s", sprintf("%8.3f", pvm.pgrec + 0.0));
+ put_output("pgfrec/s", sprintf("%8.3f", pvm.pgfrec + 0.0));
+ put_output("pgin/s", sprintf("%8.3f", pvm.pgin + 0.0));
+ put_output("pages_in/s", sprintf("%8.3f", pvm.pages_in + 0.0));
+ put_output("pgout/s", sprintf("%8.3f", pvm.pgout + 0.0));
+ put_output("pages_out/s", sprintf("%8.3f", pvm.pages_out + 0.0));
+ put_output("dfree/s", sprintf("%8.3f", pvm.dfree + 0.0));
+ put_output("min_fault/s", sprintf("%8.3f", pvm.hat_fault + pvm.as_fault + 0.0))
+;
+ put_output("maj_fault/s", sprintf("%8.3f", pvm.maj_fault + 0.0));
+ put_output("prot_fault/s", sprintf("%8.3f", pvm.prot_fault + 0.0));
+ put_output("cow_fault/s", sprintf("%8.3f", pvm.cow_fault + 0.0));
+ put_output("zfod/s", sprintf("%8.3f", pvm.zfod + 0.0));
+ put_output("interrupts/s", sprintf("%8.3f", pvm.interrupts + 0.0));
+ put_output("intrthread/s", sprintf("%8.3f", pvm.intrthread + 0.0));
+ put_output("system_calls/s", sprintf("%8.3f", pvm.system_calls + 0.0));
+ put_output("context_switches/s", sprintf("%8.3f", pvm.context_switches + 0.0));
+ put_output("invol_switches/s", sprintf("%8.3f", pvm.invol_switches + 0.0));
+ put_output("traps/s", sprintf("%8.3f", pvm.trap + 0.0));
+ put_output("forks/s", sprintf("%8.3f", pvm.sysfork + 0.0));
+ put_output("vforks/s", sprintf("%8.3f", pvm.sysvfork + 0.0));
+ put_output("execs/s", sprintf("%8.3f", pvm.sysexec + 0.0));
+ put_output("namei/s", sprintf("%8.3f", pvm.namei + 0.0));
+ put_output("ufsiget/s", sprintf("%8.3f", pvm.ufsiget + 0.0));
+ put_output("ufsdirblk/s", sprintf("%8.3f", pvm.ufsdirblk + 0.0));
// Calculate the rate of new process spawning.
if (can_read_kernel != 0) {
@@ -1893,9 +1951,13 @@
#ifdef WATCH_INODE
measure_inode()
{
+ p_vmstat pvm;
+
put_output("inod_ref/s", sprintf("%10.3f", tmp_lr_inode.refrate));
put_output("inod_hit%", sprintf("%9.3f", tmp_lr_inode.hitrate));
put_output("inod_stl/s", sprintf("%10.3f", tmp_lr_inode.iprate));
+ put_output("ufsinopage/s", sprintf("%8.3f", pvm.ufsinopage + 0.0));
+
}
#endif
Modified: trunk/orca/data_gatherers/orcallator/start_orcallator.sh.in
==============================================================================
--- trunk/orca/data_gatherers/orcallator/start_orcallator.sh.in (original)
+++ trunk/orca/data_gatherers/orcallator/start_orcallator.sh.in 2003-01-19 20:24:26.000000000 -0800
@@ -13,6 +13,12 @@
UNAME=@UNAME@
RAW_ORCALLATOR_DIR=@VAR_DIR@/orcallator
SE=@SE@
+SE_VERSION=`$SE -version | nawk '{print $4}'`
+if [ "$SE_VERSION" != "3.2.1" -a "$SE_VERSION" != "3.3" ]; then
+ echo "SE version MUST be 3.3 or 3.2.1 .. please upgrade SE."
+ echo "See http://www.setoolkit.com for more information."
+ exit 1
+fi
# WEB_LOG contains the location of the web server log file that
# orcallator.se should read.
@@ -103,7 +109,7 @@
# Now start the logging.
echo "Starting logging"
-nohup $SE $SE_PATCHES -DWATCH_OS $WATCH_WEB $libdir/orcallator.se &
+nohup $SE $SE_PATCHES -DWATCH_OS $WATCH_WEB -I$libdir/SE/$SE_VERSION $libdir/orcallator.se &
# Write the PID of orcallator to a file to make killing easier.
pid=$!
More information about the Orca-checkins
mailing list