Logo

Thin Clients : Benchmark 

WebServerTest.pl

#!/usr/bin/perl
#
# Simple test script to get some idea of performance of a web server.
# Spawns one or more processes and retrieves one or more web pages from the
# remote server.  Times how long it takes.
#
# Derived from: torture.pl written by Lincoln D Stein in 1999
#               see: http://stein.cshl.org/~lstein/torture/
#
#   WebserverTest.pl
#   Author  : David Parkinson
#   Version : 1.0
#   Date    : 30th August 2010

use strict;
use warnings;
use LWP;
use Time::HiRes 'time','sleep';
use IO::Pipe;
use POSIX 'WNOHANG';
use Getopt::Std;

# Usage message
my $USAGE = <<USAGE;
 Usage: $0 -[options] (URL | FILE)
    Simple test to check the performance of a web server.
  
  Options:
    -t <integer>  Number of times to run the test        [1]
    -c <integer>  Number of copies of program to run     [1]
    -d <integer>  Mean delay between serial accesses    [0ms]
    -h <name>     Name or IP address of host             Overrides any entry in URL or FILE

   Any FILE starts with the name of the host followed by a list of paths to be checked
   relative to the root directory.
   eg.
   www.parkytowers.me.uk
   thin/
   thin/EvoT20/Linux.shtml
   ....
USAGE
    ;

my $VERSION = '1.00';

# process command line
our( $opt_t,$opt_c,$opt_d,$opt_h);
getopts('t:c:d:h:') || die $USAGE;

# get parameters
my $FILE   = shift  || die $USAGE;
my $TIMES  = $opt_t || 1;
my $COPIES = $opt_c || 1;
my $DELAY  = $opt_d || 0;
my $HOST   = $opt_h || "";

# get the time
my $localtime = localtime();

print "** WebServerTest.pl version $VERSION starting at $localtime\n";

# Command line either specifies a FILE or a single URL.
# If it is a file then we read it and stuff the URLs in <@urls>

my ($host,$url,@urls);
if( $FILE=~/^http:/ ) {
    # parse out the URL in a simple fashion
    ($host,$url) = $FILE =~ m!^http://([^/]+)(.*)!;
    $host .= ':80' unless $host=~/:\d+$/;
} else {
    open TARGET, "<", $FILE or die "Cannot open file <$FILE>\n";
    $host = <TARGET>;		# Read in $host
    chomp $host;
    @urls = <TARGET>;		# Add the list of URLs
    close TARGET;
}

# If a Host was specified in the command line this overrides any entry
# that might have been in the first line of the file.

if( $HOST ) { $host = $HOST; }

# first we run the dummy test to measure the test overhead, then we run it for real
my $dummy = do_stats(0);
my $real  = do_stats(1);

# adjust elapsed and transaction time to reflect overhead from test
$real->{elapsed}    -= $dummy->{elapsed};
$real->{trans_time} -= $dummy->{trans_time};

print_results($real);
print "** WebServerTest.pl version $VERSION ending at ",scalar localtime,"\n";

exit 0;

# Get the stats
sub do_stats {
    my $doit  = shift;
    my $start = time();

    # open a pipe so that child processes can send results to parent.
    my $pipe = IO::Pipe->new || die "Can't pipe: $!";

    # spawn correct number of children
    for (my $i=0; $i<$COPIES; $i++) {
	die "Can't fork: $!" unless defined (my $pid = fork());
	# if parent, continue spawning children
	next if $pid > 0;  

	# otherwise we're a child, so we run the test once and exit
	$pipe->writer; select $pipe;
	run_test($doit);
	exit 0;
    }

    # Having spawned the children the Parent reads and tallies the results
    $SIG{CHLD} = sub { do {} while waitpid(-1,WNOHANG) > 0; };

    # open pipe for reading
    $pipe->reader;
    my $stats = tally_results($pipe);
    $stats->{elapsed} = time() - $start;
    return $stats;
}

# This subroutine is called to tally up the test results
# from all the children.
sub tally_results {
    my $pipe = shift;
    my (%STATUS,$TIME,$BYTES,$COUNT);
    while (<$pipe>) {
	chomp;
	my ($child,$time,$bytes,$code) = split("\t");
	$COUNT++;
	$STATUS{$code}++;
	$TIME  += $time;
	$BYTES += $bytes;
    }
    return { 
	count      => $COUNT,
	trans_time => $TIME,
	bytes      => $BYTES,
	status     => \%STATUS
    };
} 

# Runs a test.  We only actually retrieve URLs if $doit is set
sub run_test {
    my $doit = shift;
    my $target;

    for (1..$TIMES) {
 	sleep(rand ($DELAY/1000)) if $DELAY;

	my ($status,$message,$contents);

	# Use $url or select a random URL if it isn't set.
	if( $url ) {
	    $target = $url;
	} else {
	    $target = @urls[rand scalar @urls];
	    chomp $target;
	    $target = "http://".$host."/".$target;
	}

	my $start = time();
	($status,$message,$contents) = fetch($target) if     $doit;
	($status,$message,$contents) = (200,'','')    unless $doit;
	my $elapsed = time() - $start;
	my $bytes   = length($contents);

	warn "$$: ",$message,"\n" if $status >= 500;
	print join("\t",$$,$elapsed,$bytes,$status),"\n";
    }
}

# Quick & dirty http client
sub fetch {
    my $url = shift;

    # parse out the URL in a simple fashion
    my ($nethost,$request) = $url =~ m!^http://([^/]+)(.*)!;
    $nethost .= ':80' unless $nethost=~/:\d+$/;
    $request ||= '/';

    # try to make connection with remote host
    my $browser  = LWP::UserAgent->new;
    my $response = $browser->get( $url );
    die "Can't get $url -- ", $response->status_line unless $response->is_success;
    return ($response->code,$response->message,$response->content);
}

# print the results
sub print_results {
    my $s = shift;
    my $throughput       = sprintf "%3.2f",$s->{bytes}      / $s->{elapsed};
    my $resp_time        = sprintf "%3.2f",$s->{trans_time} / $s->{count};
    my $trans_rate       = sprintf "%3.2f",$s->{count}      / $s->{elapsed};
    my $concurrency      = sprintf "%3.1f",$s->{trans_time} / $s->{elapsed};
    my $elapsed          = sprintf "%3.3f",$s->{elapsed};
    print STDOUT <<EOF;
Tests run on Host: $host $TIMES times with $COPIES copies and average delay of $DELAY seconds
Transactions:           $s->{count}
Elapsed time:           $elapsed sec
Bytes Transferred:      $s->{bytes} bytes
Response Time:          $resp_time sec
Transaction Rate:       $trans_rate trans/sec
Throughput:             $throughput bytes/sec
Concurrency:            $concurrency
EOF
    for my $code (sort {$a <=> $b} keys %{$s->{status}}) {
	print "Status Code $code:        $s->{status}->{$code}\n";
    }
}

 


Any comments? email me.    Last update August 2010