[Orca-dev] [PATCH] Updates to include additional Solaris performance metrics for graphing
Sean O'Neill
sean at seanoneill.info
Tue Nov 19 19:34:01 PST 2002
Additions to Orca to collect and graph the following metrics for SE 3.2.1
(Solaris 8 and below) and SE 3.3 (Solaris 9):
pgrec - page reclaims (include pageout)
pgfrec - page reclaims from free list
pgin - pageins
pages_in - pages paged in
pgout - pageouts
pages_out - pages paged out
dfree - pages freed by daemon or auto
min_fault - minor page faults (pvm.hat_fault + pvm.as_fault)
maj_fault - major page faults
prot_fault - protection faults
cow_fault - copy-on-write faults
zfod - zero-fill-on-demand faults
interrupts - interrupts including clock
intrthreads - interrupts as threads (below clock)
system_calls
context_switches
invol_switches - involuntary context switches
trap
forks
vforks
execs
namei - patchname lookups
ufsiget - ufs_iget() calls
ufsdirblk - directory blocks read
ufsinopage - inodes taked with no attached pages
Changes made to:
* orcallator/orcallator.cfg.in: additions to graphs metrics listed above
* orcallator/orcallator.se: additions to collect metrics listed above
* orcallator/start_orcallator.sh.in: updates to correctly identify the
version of the SE tool kit in use to properly include the correct version
of orca_p_vmstat_class.se.
* lib/Makefile.in: updates to allow for installation of the two versions of
orca_p_vmstat_class.se
* lib/Orca/Constants.pm: changed hourly graph so that it graphs 3 hours of
data instead of 1.5 hours.
* lib/3.2.1/orca_p_vmstat_class.se: new file
* lib/3.3/orca_p_vmstat_class.se: new file
SVN Diff for updates to Subversion rev 165 follows:
Index: ./orcallator/orcallator.cfg.in
===================================================================
--- ./orcallator/orcallator.cfg.in
+++ ./orcallator/orcallator.cfg.in Tue Nov 19 14:21:48 2002
@@ -533,6 +533,54 @@
}
plot {
+title %g Interrupts, System Calls, Traps
+source orcallator
+data interrupts
+data intrthread
+data system_calls
+data trap
+line_type area
+line_type line1
+line_type line1
+line_type line1
+legend Interrupts/s
+legend ThreadInterrupts/s
+legend Systems Calls/s
+legend Traps/s
+data_min 0
+}
+
+plot {
+title %g Forks, Vforks, & Execs
+source orcallator
+data forks
+data vforks
+data execs
+line_type area
+line_type line1
+line_type line1
+legend forks/s
+legend vforks/s
+legend execs/s
+data_min 0
+}
+
+plot {
+title %g Context Switches
+source orcallator
+data context_switches - invol_switches
+data invol_switches
+data context_switches
+line_type area
+line_type stack
+line_type line1
+legend Voluntary Switches/s
+legend Involuntary Switches/s
+legend Total
+data_min 0
+}
+
+plot {
title %g NFS Server Call Rate
source orcallator
data nfss_calls
@@ -696,6 +744,21 @@
}
plot {
+title %g namei/s, iget/s, dirblk/s
+source orcallator
+data namei
+data ufsiget
+data ufsdirblk
+line_type area
+line_type line1
+line_type line1
+legend namei/s
+legend iget/s
+legend dirblk/s
+data_min 0
+}
+
+plot {
title %g Cache Inode Steal Rate
source orcallator
data inod_stl/s
@@ -774,6 +837,68 @@
color ff0000
color 0000ff
href
http://www.orcaware.com/orca/docs/orcallator.html#page_usage
+}
+
+plot {
+title %g Pageins/s, Pageouts/s, PagesFreed/s
+source orcallator
+data pgin
+data pgout
+data dfree
+line_type area
+line_type line1
+line_type line1
+legend PageIns/s
+legend PageOuts/s
+legend PagesFreed/s
+y_legend Pages/s
+data_min 0
+plot_min 0
+}
+
+plot {
+title %g Pages PagedIn/s & Pages PagedOut/s
+source orcallator
+data pages_in
+data pages_out
+line_type area
+line_type line1
+legend PageIns/s
+legend PageOuts/s
+y_legend Pages/s
+data_min 0
+plot_min 0
+}
+
+plot {
+title %g Major & Minor & Protection Page Faults
+source orcallator
+data maj_fault
+data min_fault
+data prot_fault
+line_type area
+line_type line1
+line_type line1
+legend Major
+legend Minor
+legend Protection
+y_legend Faults/s
+data_min 0
+plot_min 0
+}
+
+plot {
+title %g COW & ZFOD Page Faults
+source orcallator
+data cow_fault
+data zfod
+line_type area
+line_type line1
+legend Copy-on-write
+legend Zero-fill-on-demand
+y_legend Faults/s
+data_min 0
+plot_min 0
}
plot {
Index: ./orcallator/orcallator.se
===================================================================
--- ./orcallator/orcallator.se
+++ ./orcallator/orcallator.se Thu Oct 3 21:05:37 2002
@@ -8,6 +8,35 @@
//
// Portions copied from percollator.se written by Adrian Cockroft.
//
+// Version 1.36: Sep 05, 2002 Additions by Sean O'Neill
+// Added several new variables for Orca to
collect
+// and graph including:
+// pgrec - page reclaims (include pageout)
+// pgfrec - page reclaims from free list
+// pgin - pageins
+// pages_in - pages paged in
+// pgout - pageouts
+// pages_out - pages paged out
+// dfree - pages freed by daemon or auto
+// min_fault - minor page faults (pvm.hat_fault + pvm.as_fault)
+// maj_fault - major page faults
+// prot_fault - protection faults
+// cow_fault - copy-on-write faults
+// zfod - zero-fill-on-demand faults
+// interrupts - interrupts including clock
+// intrthreads - interrupts as threads (below clock)
+// system_calls
+// context_switches
+// invol_switches - involuntary context switches
+// trap
+// forks
+// vforks
+// execs
+// namei - patchname lookups
+// ufsiget - ufs_iget() calls
+// ufsdirblk - directory blocks read
+// ufsinopage - inodes taked with no attached pages
+//
// Version 1.35: Aug 11, 2002 Add a new measurement, the number
of secure web
// server processes on the system using the
// column name #httpsds. If the environmental
@@ -324,7 +353,7 @@
#include <p_iostat_class.se>
#include <p_netstat_class.se>
-#include <p_vmstat_class.se>
+#include <orca_p_vmstat_class.se>
#include <pure_rules.se>
#include <live_rules.se>
#include <mib.se>
@@ -1447,6 +1476,31 @@
put_output("#waiting", sprintf("%8.2f", pvm.waiting + 0.0));
put_output(" #swpque", sprintf("%8.2f", pvm.swpque + 0.0));
put_output("scanrate", sprintf("%8.3f", pvm.scan + 0.0));
+ put_output("pgrec", sprintf("%8.3f", pvm.pgrec + 0.0));
+ put_output("pgfrec", sprintf("%8.3f", pvm.pgfrec + 0.0));
+ put_output("pgin", sprintf("%8.3f", pvm.pgin + 0.0));
+ put_output("pages_in", sprintf("%8.3f", pvm.pages_in + 0.0));
+ put_output("pgout", sprintf("%8.3f", pvm.pgout + 0.0));
+ put_output("pages_out", sprintf("%8.3f", pvm.pages_out + 0.0));
+ put_output("dfree", sprintf("%8.3f", pvm.dfree + 0.0));
+ put_output("min_fault", sprintf("%8.3f", pvm.hat_fault + pvm.as_fault +
0.0))
+;
+ put_output("maj_fault", sprintf("%8.3f", pvm.maj_fault + 0.0));
+ put_output("prot_fault", sprintf("%8.3f", pvm.prot_fault + 0.0));
+ put_output("cow_fault", sprintf("%8.3f", pvm.cow_fault + 0.0));
+ put_output("zfod", sprintf("%8.3f", pvm.zfod + 0.0));
+ put_output("interrupts", sprintf("%8.3f", pvm.interrupts + 0.0));
+ put_output("intrthread", sprintf("%8.3f", pvm.intrthread + 0.0));
+ put_output("system_calls", sprintf("%8.3f", pvm.system_calls + 0.0));
+ put_output("context_switches", sprintf("%8.3f", pvm.context_switches +
0.0));
+ put_output("invol_switches", sprintf("%8.3f", pvm.invol_switches + 0.0));
+ put_output("trap", sprintf("%8.3f", pvm.trap + 0.0));
+ put_output("forks", sprintf("%8.3f", pvm.sysfork + 0.0));
+ put_output("vforks", sprintf("%8.3f", pvm.sysvfork + 0.0));
+ put_output("execs", sprintf("%8.3f", pvm.sysexec + 0.0));
+ put_output("namei", sprintf("%8.3f", pvm.namei + 0.0));
+ put_output("ufsiget", sprintf("%8.3f", pvm.ufsiget + 0.0));
+ put_output("ufsdirblk", sprintf("%8.3f", pvm.ufsdirblk + 0.0));
// Calculate the rate of new process spawning.
if (can_read_kernel != 0) {
@@ -1784,9 +1838,13 @@
#ifdef WATCH_INODE
measure_inode()
{
+ p_vmstat pvm;
+
put_output("inod_ref/s", sprintf("%10.3f", tmp_lr_inode.refrate));
put_output("inod_hit%", sprintf("%9.3f", tmp_lr_inode.hitrate));
put_output("inod_stl/s", sprintf("%10.3f", tmp_lr_inode.iprate));
+ put_output("ufsinopage", sprintf("%8.3f", pvm.ufsinopage + 0.0));
+
}
#endif
Index: ./orcallator/start_orcallator.sh.in
===================================================================
--- ./orcallator/start_orcallator.sh.in
+++ ./orcallator/start_orcallator.sh.in Tue Nov 19 14:21:49 2002
@@ -13,6 +13,7 @@
UNAME=@UNAME@
ORCALLATOR_DIR=@VAR_DIR@/orcallator
SE=@SE@
+SE_VERSION=`$SE -version | nawk '{print $4}'`
# WEB_LOG contains the location of the web server log file that
# orcallator.se should read.
@@ -103,7 +104,7 @@
# Now start the logging.
echo "Starting logging"
-nohup $SE $SE_PATCHES -DWATCH_OS $WATCH_WEB $libdir/orcallator.se &
+nohup $SE $SE_PATCHES -DWATCH_OS $WATCH_WEB -I$libdir/$SE_VERSION
$libdir/orcallator.se &
# Write the PID of orcallator to a file to make killing easier.
pid=$!
Index: ./lib/Makefile.in
===================================================================
--- ./lib/Makefile.in
+++ ./lib/Makefile.in Tue Nov 19 16:31:36 2002
@@ -40,6 +40,17 @@
done; \
fi
+ $(MKDIR) $(libdir)/3.2.1
+ @for f in 3.2.1/*; do \
+ $(INSTALL) -m 0644 $$f $(libdir)/3.2.1; \
+ echo $(INSTALL) -m 0644 $$f $(libdir)/3.2.1; \
+ done
+ $(MKDIR) $(libdir)/3.3
+ @for f in 3.3/*; do \
+ $(INSTALL) -m 0644 $$f $(libdir)/3.3; \
+ echo $(INSTALL) -m 0644 $$f $(libdir)/3.3; \
+ done
+
clean:
distclean: clean
Index: ./lib/Orca/Constants.pm
===================================================================
--- ./lib/Orca/Constants.pm
+++ ./lib/Orca/Constants.pm Tue Nov 19 16:32:38 2002
@@ -75,7 +75,7 @@
BEGIN {
@CONST_IMAGE_PLOT_TYPES = qw(hourly daily weekly monthly quarterly yearly);
%CONST_IMAGE_PLOT_INFO =
- ('hourly' => [$RRA_PDP_COUNTS[0], 1.5*60*60], # 18 data points
+ ('hourly' => [$RRA_PDP_COUNTS[0], 3*60*60], # 36 data points
'daily' => [$RRA_PDP_COUNTS[0], 1.5*DAY_SECONDS], # 432 data points
'weekly' => [$RRA_PDP_COUNTS[1], 10 *DAY_SECONDS], # 480 data points
'monthly' => [$RRA_PDP_COUNTS[2], 40 *DAY_SECONDS], # 480 data points
Index: ./lib/3.2.1/orca_p_vmstat_class.se
===================================================================
--- ./lib/3.2.1/orca_p_vmstat_class.se
+++ ./lib/3.2.1/orca_p_vmstat_class.se Thu Oct 3 20:41:12 2002
@@ -0,0 +1,386 @@
+//
+// Copyright (c) 1993-2001 by Richard Pettit. All rights reserved.
+//
+// Some of this work was derived from include files containing the following
+// copyrights.
+//
+// Copyright (c) 1986-1994 by Sun Microsystems, Inc.
+// Copyright (c) 1983-1989 by AT&T
+// Copyright (c) 1980-1993 by The Regents of the University of California.
+//
+// The work as a whole represents unique intellectual property and is
+// copyright by Richard Pettit as shown on the first line.
+//
+
+#ifndef _P_VMSTAT_CLASS_SE_
+#define _P_VMSTAT_CLASS_SE_
+
+#include <unistd.se>
+#include <kstat.se>
+#include <sysdepend.se>
+
+/* for memory computation */
+#define PGTOK(n) (((n) * pagesize) / 1024)
+#define DELTA(name) (new_vminfo.name - old_vminfo.name)
+#define COMPUTE(name) PGTOK(DELTA(name) / updates)
+
+/* make the code easier on the eye */
+#define CSI_DIFF(name) \
+ pvmGLOB_cpu_sysinfo[i].name = \
+ (csi.name - pvmGLOB_old_cpu_sysinfo[i].name)
+#define CSI_PCT(name) \
+ pvmGLOB_cpu_sysinfo[i].name = (pvmGLOB_cpu_sysinfo[i].name / total) * 100.0
+
+#define CSIPCT(name) (100.0 * name / (hz * pvmGLOB_etime[i]))
+
+#define CVI_DIFF(name) \
+ pvmGLOB_cpu_vminfo[i].name = \
+ (cvi.name - pvmGLOB_old_cpu_vminfo[i].name)
+
+double pvmGLOB_etime[];
+ulong pvmGLOB_old_time[];
+ks_cpu_sysinfo pvmGLOB_cpu_sysinfo[];
+ks_cpu_sysinfo pvmGLOB_old_cpu_sysinfo[];
+ks_cpu_vminfo pvmGLOB_cpu_vminfo[];
+ks_cpu_vminfo pvmGLOB_old_cpu_vminfo[];
+
+int pvmGLOB_cpu_size;
+
+pvmGLOB_realloc()
+{
+ pvmGLOB_etime = renew pvmGLOB_etime [pvmGLOB_cpu_size];
+ pvmGLOB_old_time = renew pvmGLOB_old_time [pvmGLOB_cpu_size];
+ pvmGLOB_cpu_sysinfo = renew pvmGLOB_cpu_sysinfo [pvmGLOB_cpu_size];
+ pvmGLOB_old_cpu_sysinfo = renew pvmGLOB_old_cpu_sysinfo [pvmGLOB_cpu_size];
+ pvmGLOB_cpu_vminfo = renew pvmGLOB_cpu_vminfo [pvmGLOB_cpu_size];
+ pvmGLOB_old_cpu_vminfo = renew pvmGLOB_old_cpu_vminfo [pvmGLOB_cpu_size];
+}
+
+class p_vmstat {
+
+ /* which cpu */
+ int number$;
+ int instance; /* instance number from the kernel */
+ int ncpus; /* current total number of CPUs */
+
+ /* these values are numbers of procs */
+ double runque;
+ double waiting;
+ double swpque;
+
+ /* these values are in Kbytes */
+ int swap_avail;
+ int freemem;
+
+ double pgrec; /* page reclaims (include pageout) */
+ double pgfrec; /* page reclaims from free list */
+ double pgin; /* pageins */
+ double pgout; /* pageouts */
+ double pgswapin; /* pages swapped in */
+ double pgswapout; /* pages swapped out */
+ double dfree; /* pages freed by daemon or auto */
+
+ double hat_fault; /* minor page faults via hat_fault() */
+ double as_fault; /* minor page faults via as_fault() */
+ double maj_fault; /* major page faults */
+ double prot_fault; /* protection faults */
+ double cow_fault; /* copy-on-write faults */
+ double zfod; /* pages zero filled on demand */
+
+ double sysfork; /* forks */
+ double sysvfork; /* vforks */
+ double sysexec; /* execs */
+
+ double namei; /* pathname lookups */
+ double ufsiget; /* pathname lookups */
+ double ufsdirblk; /* directory block read */
+
+ double ufsinopage; /* inodes taken with no attached pgs */
+
+ /* these values are per second */
+ double pages_in; /* pages paged in */
+ double pages_out; /* pages paged out */
+ double swapins; /* swap-in occurrences */
+ double swapouts; /* swap-out occurrences */
+ double scan; /* pages scanned */
+ double pgrrun; /* how many times did pageout run */
+
+ double smtx; /* sleeps on a mutex per sec - mutex adaptive enter */
+ double interrupts; /* interrupts including clock */
+ double intrthread; /* interrupts as threads (below clock) */
+ double system_calls;
+ double context_switches;
+ double invol_switches;
+ double trap;
+
+ /* these are percentages of total over the last period */
+ double user_time;
+ double system_time;
+ double wait_time;
+ double idle_time;
+
+ p_vmstat$()
+ {
+ int i;
+ int n;
+ double updates;
+ int initialized = 0;
+ int last_number;
+ double hz = sysconf(_SC_CLK_TCK);
+ long pagesize = sysconf(_SC_PAGESIZE);
+ double total;
+ ulong itime;
+ ulong new_time;
+ ks_cpu_sysinfo csi;
+ ks_cpu_vminfo cvi;
+ ks_system_misc kstat$misc;
+ ks_sysinfo kstat$info;
+ ks_sysinfo old_sysinfo;
+ ks_sysinfo new_sysinfo;
+ ks_vminfo kstat$vminfo;
+ ks_vminfo old_vminfo;
+ ks_vminfo new_vminfo;
+
+ // allow this to change each time
+ ncpus = sysconf(_SC_NPROCESSORS_ONLN);
+
+ /* grab initial values */
+ if (initialized == 0) {
+ pvmGLOB_cpu_size = ncpus;
+
+ pvmGLOB_etime = new double[pvmGLOB_cpu_size];
+ pvmGLOB_old_time = new ulong[pvmGLOB_cpu_size];
+ pvmGLOB_cpu_sysinfo = new ks_cpu_sysinfo[pvmGLOB_cpu_size];
+ pvmGLOB_old_cpu_sysinfo = new ks_cpu_sysinfo[pvmGLOB_cpu_size];
+ pvmGLOB_cpu_vminfo = new ks_cpu_vminfo[pvmGLOB_cpu_size];
+ pvmGLOB_old_cpu_vminfo = new ks_cpu_vminfo[pvmGLOB_cpu_size];
+
+ n = kstat$misc.clk_intr;
+ for(i=0; i<ncpus; i++) {
+ csi.number$ = i;
+ refresh$(csi);
+ pvmGLOB_old_cpu_sysinfo[i] = csi;
+ cvi.number$ = i;
+ refresh$(cvi);
+ pvmGLOB_old_cpu_vminfo[i] = cvi;
+ pvmGLOB_old_time[i] = n;
+ pvmGLOB_etime[i] = n / hz;
+ }
+
+ /* memory values */
+ old_vminfo = kstat$vminfo;
+ old_sysinfo = kstat$info;
+
+ initialized = 1;
+ last_number = number$;
+
+ return;
+ }
+
+ // keep up with ncpus
+ if (ncpus > pvmGLOB_cpu_size) {
+ pvmGLOB_cpu_size = ncpus;
+ pvmGLOB_realloc();
+ }
+
+ /* select which cpu */
+ i = number$;
+ if (i < 0 || i >= ncpus) {
+ number$ = -1;
+ return;
+ }
+ instance = pvmGLOB_old_cpu_sysinfo[i].instance;
+
+ /* how much time has gone by */
+ new_time = kstat$misc.clk_intr;
+ itime = new_time - pvmGLOB_old_time[i];
+
+ /* no time has gone by, return */
+ if (itime == 0) {
+ if (i != last_number) {
+ smtx = pvmGLOB_cpu_sysinfo[i].mutex_adenters/pvmGLOB_etime[i];
+ interrupts = pvmGLOB_cpu_sysinfo[i].intr/pvmGLOB_etime[i];
+ intrthread = pvmGLOB_cpu_sysinfo[i].intrthread/pvmGLOB_etime[i];
+ system_calls = pvmGLOB_cpu_sysinfo[i].syscall/pvmGLOB_etime[i];
+ trap = pvmGLOB_cpu_sysinfo[i].trap/pvmGLOB_etime[i];
+ context_switches = pvmGLOB_cpu_sysinfo[i].pswitch/pvmGLOB_etime[i];
+ invol_switches = pvmGLOB_cpu_sysinfo[i].inv_swtch/pvmGLOB_etime[i];
+ user_time = CSIPCT(pvmGLOB_cpu_sysinfo[i].cpu[CPU_USER]);
+ system_time = CSIPCT(pvmGLOB_cpu_sysinfo[i].cpu[CPU_KERNEL]);
+ wait_time = CSIPCT(pvmGLOB_cpu_sysinfo[i].cpu[CPU_WAIT]);
+ idle_time = CSIPCT(pvmGLOB_cpu_sysinfo[i].cpu[CPU_IDLE]);
+
+ sysfork = pvmGLOB_cpu_sysinfo[i].sysfork/pvmGLOB_etime[i];
+ sysvfork = pvmGLOB_cpu_sysinfo[i].sysvfork/pvmGLOB_etime[i];
+ sysexec = pvmGLOB_cpu_sysinfo[i].sysexec/pvmGLOB_etime[i];
+
+ namei = pvmGLOB_cpu_sysinfo[i].namei/pvmGLOB_etime[i];
+ ufsiget = pvmGLOB_cpu_sysinfo[i].ufsiget/pvmGLOB_etime[i];
+ ufsdirblk = pvmGLOB_cpu_sysinfo[i].ufsdirblk/pvmGLOB_etime[i];
+
+ ufsinopage = pvmGLOB_cpu_sysinfo[i].ufsinopage/pvmGLOB_etime[i];
+
+ pgrec = pvmGLOB_cpu_vminfo[i].pgrec/pvmGLOB_etime[i];
+ pgfrec = pvmGLOB_cpu_vminfo[i].pgfrec/pvmGLOB_etime[i];
+ pgin = pvmGLOB_cpu_vminfo[i].pgin/pvmGLOB_etime[i];
+ pgout = pvmGLOB_cpu_vminfo[i].pgout/pvmGLOB_etime[i];
+ dfree = pvmGLOB_cpu_vminfo[i].dfree/pvmGLOB_etime[i];
+
+ hat_fault = pvmGLOB_cpu_vminfo[i].hat_fault/pvmGLOB_etime[i];
+ as_fault = pvmGLOB_cpu_vminfo[i].as_fault/pvmGLOB_etime[i];
+ maj_fault = pvmGLOB_cpu_vminfo[i].maj_fault/pvmGLOB_etime[i];
+ prot_fault = pvmGLOB_cpu_vminfo[i].prot_fault/pvmGLOB_etime[i];
+ cow_fault = pvmGLOB_cpu_vminfo[i].cow_fault/pvmGLOB_etime[i];
+ zfod = pvmGLOB_cpu_vminfo[i].zfod/pvmGLOB_etime[i];
+
+ pages_in = pvmGLOB_cpu_vminfo[i].pgpgin/pvmGLOB_etime[i];
+ pages_out = pvmGLOB_cpu_vminfo[i].pgpgout/pvmGLOB_etime[i];
+ swapins = pvmGLOB_cpu_vminfo[i].swapin/pvmGLOB_etime[i];
+ swapouts = pvmGLOB_cpu_vminfo[i].swapout/pvmGLOB_etime[i];
+ scan = pvmGLOB_cpu_vminfo[i].scan/pvmGLOB_etime[i];
+ pgrrun = pvmGLOB_cpu_vminfo[i].pgrrun/pvmGLOB_etime[i];
+
+ last_number = i;
+ }
+ return;
+ }
+ pvmGLOB_etime[i] = itime / hz;
+ pvmGLOB_old_time[i] = new_time;
+
+ csi.number$ = i;
+ refresh$(csi);
+
+ cvi.number$ = i;
+ refresh$(cvi);
+
+ new_sysinfo = kstat$info;
+ updates = new_sysinfo.updates - old_sysinfo.updates;
+ if (updates > 0.0) {
+ new_vminfo = kstat$vminfo;
+ }
+
+ /* compute cpu sysinfo diffs */
+ CSI_DIFF(mutex_adenters);
+ CSI_DIFF(intr);
+ CSI_DIFF(intrthread);
+ CSI_DIFF(syscall);
+ CSI_DIFF(pswitch);
+ CSI_DIFF(inv_swtch);
+ CSI_DIFF(trap);
+ CSI_DIFF(cpu[CPU_USER]);
+ CSI_DIFF(cpu[CPU_KERNEL]);
+ CSI_DIFF(cpu[CPU_WAIT]);
+ CSI_DIFF(cpu[CPU_IDLE]);
+ CSI_DIFF(wait[W_IO]);
+ CSI_DIFF(wait[W_SWAP]);
+ CSI_DIFF(wait[W_PIO]);
+
+ CSI_DIFF(sysfork);
+ CSI_DIFF(sysvfork);
+ CSI_DIFF(sysexec);
+
+ CSI_DIFF(namei);
+ CSI_DIFF(ufsiget);
+ CSI_DIFF(ufsdirblk);
+
+ CSI_DIFF(ufsinopage);
+
+ /* compute percentages
+ total = pvmGLOB_cpu_sysinfo[i].cpu[CPU_USER] +
+ pvmGLOB_cpu_sysinfo[i].cpu[CPU_KERNEL] +
+ pvmGLOB_cpu_sysinfo[i].cpu[CPU_WAIT] +
+ pvmGLOB_cpu_sysinfo[i].cpu[CPU_IDLE];
+ CSI_PCT(cpu[CPU_USER]);
+ CSI_PCT(cpu[CPU_KERNEL]);
+ CSI_PCT(cpu[CPU_WAIT]);
+ CSI_PCT(cpu[CPU_IDLE]);
+ */
+
+ /* save new values */
+ pvmGLOB_old_cpu_sysinfo[i] = csi;
+
+ CVI_DIFF(pgrec);
+ CVI_DIFF(pgfrec);
+ CVI_DIFF(pgin);
+ CVI_DIFF(pgout);
+ CVI_DIFF(dfree);
+
+ CVI_DIFF(hat_fault);
+ CVI_DIFF(as_fault);
+ CVI_DIFF(maj_fault);
+ CVI_DIFF(prot_fault);
+ CVI_DIFF(zfod);
+ CVI_DIFF(cow_fault);
+
+ /* compute page/swap values */
+ pvmGLOB_cpu_vminfo[i].pgpgin =
+ ((cvi.pgpgin - pvmGLOB_old_cpu_vminfo[i].pgpgin) * pagesize) / 1024;
+ pvmGLOB_cpu_vminfo[i].pgpgout =
+ ((cvi.pgpgout - pvmGLOB_old_cpu_vminfo[i].pgpgout) * pagesize) / 1024;
+ CVI_DIFF(swapin);
+ CVI_DIFF(swapout);
+ CVI_DIFF(scan);
+ CVI_DIFF(pgrrun);
+
+ /* save new values */
+ pvmGLOB_old_cpu_vminfo[i] = cvi;
+
+ /* update and return */
+ smtx = pvmGLOB_cpu_sysinfo[i].mutex_adenters/pvmGLOB_etime[i];
+ interrupts = pvmGLOB_cpu_sysinfo[i].intr/pvmGLOB_etime[i];
+ intrthread = pvmGLOB_cpu_sysinfo[i].intrthread/pvmGLOB_etime[i];
+ system_calls = pvmGLOB_cpu_sysinfo[i].syscall/pvmGLOB_etime[i];
+ context_switches = pvmGLOB_cpu_sysinfo[i].pswitch/pvmGLOB_etime[i];
+ invol_switches = pvmGLOB_cpu_sysinfo[i].inv_swtch/pvmGLOB_etime[i];
+ trap = pvmGLOB_cpu_sysinfo[i].trap/pvmGLOB_etime[i];
+ user_time = CSIPCT(pvmGLOB_cpu_sysinfo[i].cpu[CPU_USER]);
+ system_time = CSIPCT(pvmGLOB_cpu_sysinfo[i].cpu[CPU_KERNEL]);
+ wait_time = CSIPCT(pvmGLOB_cpu_sysinfo[i].cpu[CPU_WAIT]);
+ idle_time = CSIPCT(pvmGLOB_cpu_sysinfo[i].cpu[CPU_IDLE]);
+
+ sysfork = pvmGLOB_cpu_sysinfo[i].sysfork/pvmGLOB_etime[i];
+ sysvfork = pvmGLOB_cpu_sysinfo[i].sysvfork/pvmGLOB_etime[i];
+ sysexec = pvmGLOB_cpu_sysinfo[i].sysexec/pvmGLOB_etime[i];
+
+ namei = pvmGLOB_cpu_sysinfo[i].namei/pvmGLOB_etime[i];
+ ufsiget = pvmGLOB_cpu_sysinfo[i].ufsiget/pvmGLOB_etime[i];
+ ufsdirblk = pvmGLOB_cpu_sysinfo[i].ufsdirblk/pvmGLOB_etime[i];
+
+ ufsinopage = pvmGLOB_cpu_sysinfo[i].ufsinopage/pvmGLOB_etime[i];
+
+ pgrec = pvmGLOB_cpu_vminfo[i].pgrec/pvmGLOB_etime[i];
+ pgfrec = pvmGLOB_cpu_vminfo[i].pgfrec/pvmGLOB_etime[i];
+ pgin = pvmGLOB_cpu_vminfo[i].pgin/pvmGLOB_etime[i];
+ pgout = pvmGLOB_cpu_vminfo[i].pgout/pvmGLOB_etime[i];
+ dfree = pvmGLOB_cpu_vminfo[i].dfree/pvmGLOB_etime[i];
+
+ hat_fault = pvmGLOB_cpu_vminfo[i].hat_fault/pvmGLOB_etime[i];
+ as_fault = pvmGLOB_cpu_vminfo[i].as_fault/pvmGLOB_etime[i];
+ maj_fault = pvmGLOB_cpu_vminfo[i].maj_fault/pvmGLOB_etime[i];
+ prot_fault = pvmGLOB_cpu_vminfo[i].prot_fault/pvmGLOB_etime[i];
+ cow_fault = pvmGLOB_cpu_vminfo[i].cow_fault/pvmGLOB_etime[i];
+ zfod = pvmGLOB_cpu_vminfo[i].zfod/pvmGLOB_etime[i];
+
+ pages_in = pvmGLOB_cpu_vminfo[i].pgpgin/pvmGLOB_etime[i];
+ pages_out = pvmGLOB_cpu_vminfo[i].pgpgout/pvmGLOB_etime[i];
+ swapins = pvmGLOB_cpu_vminfo[i].swapin/pvmGLOB_etime[i];
+ swapouts = pvmGLOB_cpu_vminfo[i].swapout/pvmGLOB_etime[i];
+ scan = pvmGLOB_cpu_vminfo[i].scan/pvmGLOB_etime[i];
+ pgrrun = pvmGLOB_cpu_vminfo[i].pgrrun/pvmGLOB_etime[i];
+
+ if (updates > 0.0) {
+ freemem = COMPUTE(freemem);
+ swap_avail = COMPUTE(swap_avail);
+
+ runque = (new_sysinfo.runque - old_sysinfo.runque) / updates;
+ waiting = (new_sysinfo.waiting - old_sysinfo.waiting) / updates;
+ swpque = (new_sysinfo.swpque - old_sysinfo.swpque) / updates;
+
+ /* save old memory values */
+ old_sysinfo = new_sysinfo;
+ old_vminfo = new_vminfo;
+ }
+ }
+};
+
+#endif /* _P_VMSTAT_CLASS_SE_ */
Index: ./lib/3.3/orca_p_vmstat_class.se
===================================================================
--- ./lib/3.3/orca_p_vmstat_class.se
+++ ./lib/3.3/orca_p_vmstat_class.se Thu Oct 3 20:41:19 2002
@@ -0,0 +1,386 @@
+//
+// Copyright (c) 1993-2001 by Richard Pettit. All rights reserved.
+//
+// Some of this work was derived from include files containing the following
+// copyrights.
+//
+// Copyright (c) 1986-1994 by Sun Microsystems, Inc.
+// Copyright (c) 1983-1989 by AT&T
+// Copyright (c) 1980-1993 by The Regents of the University of California.
+//
+// The work as a whole represents unique intellectual property and is
+// copyright by Richard Pettit as shown on the first line.
+//
+
+#ifndef _P_VMSTAT_CLASS_SE_
+#define _P_VMSTAT_CLASS_SE_
+
+#include <unistd.se>
+#include <kstat.se>
+#include <sysdepend.se>
+
+/* for memory computation */
+#define PGTOK(n) (((n) * pagesize) / 1024)
+#define DELTA(name) (new_vminfo.name - old_vminfo.name)
+#define COMPUTE(name) PGTOK(DELTA(name) / updates)
+
+/* make the code easier on the eye */
+#define CSI_DIFF(name) \
+ pvmGLOB_cpu_sysinfo[i].name = \
+ (csi.name - pvmGLOB_old_cpu_sysinfo[i].name)
+#define CSI_PCT(name) \
+ pvmGLOB_cpu_sysinfo[i].name = (pvmGLOB_cpu_sysinfo[i].name / total) * 100.0
+
+#define CSIPCT(name) (100.0 * name / (hz * pvmGLOB_etime[i]))
+
+#define CVI_DIFF(name) \
+ pvmGLOB_cpu_vminfo[i].name = \
+ (cvi.name - pvmGLOB_old_cpu_vminfo[i].name)
+
+double pvmGLOB_etime[];
+ulong pvmGLOB_old_time[];
+ks_cpu_sysinfo pvmGLOB_cpu_sysinfo[];
+ks_cpu_sysinfo pvmGLOB_old_cpu_sysinfo[];
+ks_cpu_vminfo pvmGLOB_cpu_vminfo[];
+ks_cpu_vminfo pvmGLOB_old_cpu_vminfo[];
+
+int pvmGLOB_cpu_size;
+
+pvmGLOB_realloc()
+{
+ pvmGLOB_etime = renew pvmGLOB_etime [pvmGLOB_cpu_size];
+ pvmGLOB_old_time = renew pvmGLOB_old_time [pvmGLOB_cpu_size];
+ pvmGLOB_cpu_sysinfo = renew pvmGLOB_cpu_sysinfo [pvmGLOB_cpu_size];
+ pvmGLOB_old_cpu_sysinfo = renew pvmGLOB_old_cpu_sysinfo [pvmGLOB_cpu_size];
+ pvmGLOB_cpu_vminfo = renew pvmGLOB_cpu_vminfo [pvmGLOB_cpu_size];
+ pvmGLOB_old_cpu_vminfo = renew pvmGLOB_old_cpu_vminfo [pvmGLOB_cpu_size];
+}
+
+class p_vmstat {
+
+ /* which cpu */
+ int number$;
+ int instance; /* instance number from the kernel */
+ int ncpus; /* current total number of CPUs */
+
+ /* these values are numbers of procs */
+ double runque;
+ double waiting;
+ double swpque;
+
+ /* these values are in Kbytes */
+ int swap_avail;
+ int freemem;
+
+ double pgrec; /* page reclaims (include pageout) */
+ double pgfrec; /* page reclaims from free list */
+ double pgin; /* pageins */
+ double pgout; /* pageouts */
+ double pgswapin; /* pages swapped in */
+ double pgswapout; /* pages swapped out */
+ double dfree; /* pages freed by daemon or auto */
+
+ double hat_fault; /* minor page faults via hat_fault() */
+ double as_fault; /* minor page faults via as_fault() */
+ double maj_fault; /* major page faults */
+ double prot_fault; /* protection faults */
+ double cow_fault; /* copy-on-write faults */
+ double zfod; /* pages zero filled on demand */
+
+ double sysfork; /* forks */
+ double sysvfork; /* vforks */
+ double sysexec; /* execs */
+
+ double namei; /* pathname lookups */
+ double ufsiget; /* pathname lookups */
+ double ufsdirblk; /* directory block read */
+
+ double ufsinopage; /* inodes taken with no attached pgs */
+
+ /* these values are per second */
+ double pages_in; /* pages paged in */
+ double pages_out; /* pages paged out */
+ double swapins; /* swap-in occurrences */
+ double swapouts; /* swap-out occurrences */
+ double scan; /* pages scanned */
+ double pgrrun; /* how many times did pageout run */
+
+ double smtx; /* sleeps on a mutex per sec - mutex adaptive enter */
+ double interrupts; /* interrupts including clock */
+ double intrthread; /* interrupts as threads (below clock) */
+ double system_calls;
+ double context_switches;
+ double invol_switches;
+ double trap;
+
+ /* these are percentages of total over the last period */
+ double user_time;
+ double system_time;
+ double wait_time;
+ double idle_time;
+
+ p_vmstat$()
+ {
+ int i;
+ int n;
+ double updates;
+ int initialized = 0;
+ int last_number;
+ double hz = sysconf(_SC_CLK_TCK);
+ long pagesize = sysconf(_SC_PAGESIZE);
+ double total;
+ ulong itime;
+ ulong new_time;
+ ks_cpu_sysinfo csi;
+ ks_cpu_vminfo cvi;
+ ks_system_misc kstat$misc;
+ ks_sysinfo kstat$info;
+ ks_sysinfo old_sysinfo;
+ ks_sysinfo new_sysinfo;
+ ks_vminfo kstat$vminfo;
+ ks_vminfo old_vminfo;
+ ks_vminfo new_vminfo;
+
+ // allow this to change each time
+ ncpus = sysconf(_SC_NPROCESSORS_ONLN);
+
+ /* grab initial values */
+ if (initialized == 0) {
+ pvmGLOB_cpu_size = ncpus;
+
+ pvmGLOB_etime = new double[pvmGLOB_cpu_size];
+ pvmGLOB_old_time = new ulong[pvmGLOB_cpu_size];
+ pvmGLOB_cpu_sysinfo = new ks_cpu_sysinfo[pvmGLOB_cpu_size];
+ pvmGLOB_old_cpu_sysinfo = new ks_cpu_sysinfo[pvmGLOB_cpu_size];
+ pvmGLOB_cpu_vminfo = new ks_cpu_vminfo[pvmGLOB_cpu_size];
+ pvmGLOB_old_cpu_vminfo = new ks_cpu_vminfo[pvmGLOB_cpu_size];
+
+ n = kstat$misc.clk_intr;
+ for(i=0; i<ncpus; i++) {
+ csi.number$ = i;
+ refresh$(csi);
+ pvmGLOB_old_cpu_sysinfo[i] = csi;
+ cvi.number$ = i;
+ refresh$(cvi);
+ pvmGLOB_old_cpu_vminfo[i] = cvi;
+ pvmGLOB_old_time[i] = n;
+ pvmGLOB_etime[i] = n / hz;
+ }
+
+ /* memory values */
+ old_vminfo = kstat$vminfo;
+ old_sysinfo = kstat$info;
+
+ initialized = 1;
+ last_number = number$;
+
+ return;
+ }
+
+ // keep up with ncpus
+ if (ncpus > pvmGLOB_cpu_size) {
+ pvmGLOB_cpu_size = ncpus;
+ pvmGLOB_realloc();
+ }
+
+ /* select which cpu */
+ i = number$;
+ if (i < 0 || i >= ncpus) {
+ number$ = -1;
+ return;
+ }
+ instance = pvmGLOB_old_cpu_sysinfo[i].instance$;
+
+ /* how much time has gone by */
+ new_time = kstat$misc.clk_intr;
+ itime = new_time - pvmGLOB_old_time[i];
+
+ /* no time has gone by, return */
+ if (itime == 0) {
+ if (i != last_number) {
+ smtx = pvmGLOB_cpu_sysinfo[i].mutex_adenters/pvmGLOB_etime[i];
+ interrupts = pvmGLOB_cpu_sysinfo[i].intr/pvmGLOB_etime[i];
+ intrthread = pvmGLOB_cpu_sysinfo[i].intrthread/pvmGLOB_etime[i];
+ system_calls = pvmGLOB_cpu_sysinfo[i].syscall/pvmGLOB_etime[i];
+ trap = pvmGLOB_cpu_sysinfo[i].trap/pvmGLOB_etime[i];
+ context_switches = pvmGLOB_cpu_sysinfo[i].pswitch/pvmGLOB_etime[i];
+ invol_switches = pvmGLOB_cpu_sysinfo[i].inv_swtch/pvmGLOB_etime[i];
+ user_time = CSIPCT(pvmGLOB_cpu_sysinfo[i].cpu[CPU_USER]);
+ system_time = CSIPCT(pvmGLOB_cpu_sysinfo[i].cpu[CPU_KERNEL]);
+ wait_time = CSIPCT(pvmGLOB_cpu_sysinfo[i].cpu[CPU_WAIT]);
+ idle_time = CSIPCT(pvmGLOB_cpu_sysinfo[i].cpu[CPU_IDLE]);
+
+ sysfork = pvmGLOB_cpu_sysinfo[i].sysfork/pvmGLOB_etime[i];
+ sysvfork = pvmGLOB_cpu_sysinfo[i].sysvfork/pvmGLOB_etime[i];
+ sysexec = pvmGLOB_cpu_sysinfo[i].sysexec/pvmGLOB_etime[i];
+
+ namei = pvmGLOB_cpu_sysinfo[i].namei/pvmGLOB_etime[i];
+ ufsiget = pvmGLOB_cpu_sysinfo[i].ufsiget/pvmGLOB_etime[i];
+ ufsdirblk = pvmGLOB_cpu_sysinfo[i].ufsdirblk/pvmGLOB_etime[i];
+
+ ufsinopage = pvmGLOB_cpu_sysinfo[i].ufsinopage/pvmGLOB_etime[i];
+
+ pgrec = pvmGLOB_cpu_vminfo[i].pgrec/pvmGLOB_etime[i];
+ pgfrec = pvmGLOB_cpu_vminfo[i].pgfrec/pvmGLOB_etime[i];
+ pgin = pvmGLOB_cpu_vminfo[i].pgin/pvmGLOB_etime[i];
+ pgout = pvmGLOB_cpu_vminfo[i].pgout/pvmGLOB_etime[i];
+ dfree = pvmGLOB_cpu_vminfo[i].dfree/pvmGLOB_etime[i];
+
+ hat_fault = pvmGLOB_cpu_vminfo[i].hat_fault/pvmGLOB_etime[i];
+ as_fault = pvmGLOB_cpu_vminfo[i].as_fault/pvmGLOB_etime[i];
+ maj_fault = pvmGLOB_cpu_vminfo[i].maj_fault/pvmGLOB_etime[i];
+ prot_fault = pvmGLOB_cpu_vminfo[i].prot_fault/pvmGLOB_etime[i];
+ cow_fault = pvmGLOB_cpu_vminfo[i].cow_fault/pvmGLOB_etime[i];
+ zfod = pvmGLOB_cpu_vminfo[i].zfod/pvmGLOB_etime[i];
+
+ pages_in = pvmGLOB_cpu_vminfo[i].pgpgin/pvmGLOB_etime[i];
+ pages_out = pvmGLOB_cpu_vminfo[i].pgpgout/pvmGLOB_etime[i];
+ swapins = pvmGLOB_cpu_vminfo[i].swapin/pvmGLOB_etime[i];
+ swapouts = pvmGLOB_cpu_vminfo[i].swapout/pvmGLOB_etime[i];
+ scan = pvmGLOB_cpu_vminfo[i].scan/pvmGLOB_etime[i];
+ pgrrun = pvmGLOB_cpu_vminfo[i].pgrrun/pvmGLOB_etime[i];
+
+ last_number = i;
+ }
+ return;
+ }
+ pvmGLOB_etime[i] = itime / hz;
+ pvmGLOB_old_time[i] = new_time;
+
+ csi.number$ = i;
+ refresh$(csi);
+
+ cvi.number$ = i;
+ refresh$(cvi);
+
+ new_sysinfo = kstat$info;
+ updates = new_sysinfo.updates - old_sysinfo.updates;
+ if (updates > 0.0) {
+ new_vminfo = kstat$vminfo;
+ }
+
+ /* compute cpu sysinfo diffs */
+ CSI_DIFF(mutex_adenters);
+ CSI_DIFF(intr);
+ CSI_DIFF(intrthread);
+ CSI_DIFF(syscall);
+ CSI_DIFF(pswitch);
+ CSI_DIFF(inv_swtch);
+ CSI_DIFF(trap);
+ CSI_DIFF(cpu[CPU_USER]);
+ CSI_DIFF(cpu[CPU_KERNEL]);
+ CSI_DIFF(cpu[CPU_WAIT]);
+ CSI_DIFF(cpu[CPU_IDLE]);
+ CSI_DIFF(wait[W_IO]);
+ CSI_DIFF(wait[W_SWAP]);
+ CSI_DIFF(wait[W_PIO]);
+
+ CSI_DIFF(sysfork);
+ CSI_DIFF(sysvfork);
+ CSI_DIFF(sysexec);
+
+ CSI_DIFF(namei);
+ CSI_DIFF(ufsiget);
+ CSI_DIFF(ufsdirblk);
+
+ CSI_DIFF(ufsinopage);
+
+ /* compute percentages
+ total = pvmGLOB_cpu_sysinfo[i].cpu[CPU_USER] +
+ pvmGLOB_cpu_sysinfo[i].cpu[CPU_KERNEL] +
+ pvmGLOB_cpu_sysinfo[i].cpu[CPU_WAIT] +
+ pvmGLOB_cpu_sysinfo[i].cpu[CPU_IDLE];
+ CSI_PCT(cpu[CPU_USER]);
+ CSI_PCT(cpu[CPU_KERNEL]);
+ CSI_PCT(cpu[CPU_WAIT]);
+ CSI_PCT(cpu[CPU_IDLE]);
+ */
+
+ /* save new values */
+ pvmGLOB_old_cpu_sysinfo[i] = csi;
+
+ CVI_DIFF(pgrec);
+ CVI_DIFF(pgfrec);
+ CVI_DIFF(pgin);
+ CVI_DIFF(pgout);
+ CVI_DIFF(dfree);
+
+ CVI_DIFF(hat_fault);
+ CVI_DIFF(as_fault);
+ CVI_DIFF(maj_fault);
+ CVI_DIFF(prot_fault);
+ CVI_DIFF(zfod);
+ CVI_DIFF(cow_fault);
+
+ /* compute page/swap values */
+ pvmGLOB_cpu_vminfo[i].pgpgin =
+ ((cvi.pgpgin - pvmGLOB_old_cpu_vminfo[i].pgpgin) * pagesize) / 1024;
+ pvmGLOB_cpu_vminfo[i].pgpgout =
+ ((cvi.pgpgout - pvmGLOB_old_cpu_vminfo[i].pgpgout) * pagesize) / 1024;
+ CVI_DIFF(swapin);
+ CVI_DIFF(swapout);
+ CVI_DIFF(scan);
+ CVI_DIFF(pgrrun);
+
+ /* save new values */
+ pvmGLOB_old_cpu_vminfo[i] = cvi;
+
+ /* update and return */
+ smtx = pvmGLOB_cpu_sysinfo[i].mutex_adenters/pvmGLOB_etime[i];
+ interrupts = pvmGLOB_cpu_sysinfo[i].intr/pvmGLOB_etime[i];
+ intrthread = pvmGLOB_cpu_sysinfo[i].intrthread/pvmGLOB_etime[i];
+ system_calls = pvmGLOB_cpu_sysinfo[i].syscall/pvmGLOB_etime[i];
+ context_switches = pvmGLOB_cpu_sysinfo[i].pswitch/pvmGLOB_etime[i];
+ invol_switches = pvmGLOB_cpu_sysinfo[i].inv_swtch/pvmGLOB_etime[i];
+ trap = pvmGLOB_cpu_sysinfo[i].trap/pvmGLOB_etime[i];
+ user_time = CSIPCT(pvmGLOB_cpu_sysinfo[i].cpu[CPU_USER]);
+ system_time = CSIPCT(pvmGLOB_cpu_sysinfo[i].cpu[CPU_KERNEL]);
+ wait_time = CSIPCT(pvmGLOB_cpu_sysinfo[i].cpu[CPU_WAIT]);
+ idle_time = CSIPCT(pvmGLOB_cpu_sysinfo[i].cpu[CPU_IDLE]);
+
+ sysfork = pvmGLOB_cpu_sysinfo[i].sysfork/pvmGLOB_etime[i];
+ sysvfork = pvmGLOB_cpu_sysinfo[i].sysvfork/pvmGLOB_etime[i];
+ sysexec = pvmGLOB_cpu_sysinfo[i].sysexec/pvmGLOB_etime[i];
+
+ namei = pvmGLOB_cpu_sysinfo[i].namei/pvmGLOB_etime[i];
+ ufsiget = pvmGLOB_cpu_sysinfo[i].ufsiget/pvmGLOB_etime[i];
+ ufsdirblk = pvmGLOB_cpu_sysinfo[i].ufsdirblk/pvmGLOB_etime[i];
+
+ ufsinopage = pvmGLOB_cpu_sysinfo[i].ufsinopage/pvmGLOB_etime[i];
+
+ pgrec = pvmGLOB_cpu_vminfo[i].pgrec/pvmGLOB_etime[i];
+ pgfrec = pvmGLOB_cpu_vminfo[i].pgfrec/pvmGLOB_etime[i];
+ pgin = pvmGLOB_cpu_vminfo[i].pgin/pvmGLOB_etime[i];
+ pgout = pvmGLOB_cpu_vminfo[i].pgout/pvmGLOB_etime[i];
+ dfree = pvmGLOB_cpu_vminfo[i].dfree/pvmGLOB_etime[i];
+
+ hat_fault = pvmGLOB_cpu_vminfo[i].hat_fault/pvmGLOB_etime[i];
+ as_fault = pvmGLOB_cpu_vminfo[i].as_fault/pvmGLOB_etime[i];
+ maj_fault = pvmGLOB_cpu_vminfo[i].maj_fault/pvmGLOB_etime[i];
+ prot_fault = pvmGLOB_cpu_vminfo[i].prot_fault/pvmGLOB_etime[i];
+ cow_fault = pvmGLOB_cpu_vminfo[i].cow_fault/pvmGLOB_etime[i];
+ zfod = pvmGLOB_cpu_vminfo[i].zfod/pvmGLOB_etime[i];
+
+ pages_in = pvmGLOB_cpu_vminfo[i].pgpgin/pvmGLOB_etime[i];
+ pages_out = pvmGLOB_cpu_vminfo[i].pgpgout/pvmGLOB_etime[i];
+ swapins = pvmGLOB_cpu_vminfo[i].swapin/pvmGLOB_etime[i];
+ swapouts = pvmGLOB_cpu_vminfo[i].swapout/pvmGLOB_etime[i];
+ scan = pvmGLOB_cpu_vminfo[i].scan/pvmGLOB_etime[i];
+ pgrrun = pvmGLOB_cpu_vminfo[i].pgrrun/pvmGLOB_etime[i];
+
+ if (updates > 0.0) {
+ freemem = COMPUTE(freemem);
+ swap_avail = COMPUTE(swap_avail);
+
+ runque = (new_sysinfo.runque - old_sysinfo.runque) / updates;
+ waiting = (new_sysinfo.waiting - old_sysinfo.waiting) / updates;
+ swpque = (new_sysinfo.swpque - old_sysinfo.swpque) / updates;
+
+ /* save old memory values */
+ old_sysinfo = new_sysinfo;
+ old_vminfo = new_vminfo;
+ }
+ }
+};
+
+#endif /* _P_VMSTAT_CLASS_SE_ */
--
........................................................
......... ..- -. .. -..- .-. ..- .-.. . ... ............
.-- .. -. -... .-.. --- .-- ... -.. .-. --- --- .-.. ...
Sean O'Neill
More information about the Orca-dev
mailing list