I have written a code which extracts all the words from an html page, according to some heuristics. the output is correct in this my code, but there is some mysterious memory leak. I am really trying to hone some of my perl skills, but this has me banging my head.
the input is a long long file with many html pages, separated by the http header and some additional information. Some of the code may be a little weird, its kind of slapped together, but the output is what i want. without further ado:
use strict;
use warnings;
use HTML::Parse;
use HTML::FormatText;
use File::Slurp;
use Lingua::Stem::Snowball;
$/ = 'warc/0.9 ';
my $sep = 'warc/0.9 ';
open FILE, shift;
open OUT, '>', shift;
open DATA, '>', shift;
my $total = 0;
my $stemmer = Lingua::Stem::Snowball->new( lang => 'en' );
while(<FILE>)
{
my $line = $_;
$line =~ s/$sep//;
if($line)
{
$line =~ /(\d+)\sresponse\s(\S+)\s/i;
my $id = $1;
my $url = $2;
#print "$1, $2\n+++++\n";
$line =~ s/[^<]*//; #remove everything up to the 1st html tag
+ (header, etc)
my $len = length($line);
if($len < 600000)
{
print DATA "$id\t$url\t$total\t$len\n";
$total += $len;
#print OUT "$line";
my $plain_text = HTML::FormatText->new->format(parse_html(
+$line));
$plain_text =~ s/\[image\]//ig;
$plain_text =~ s/(\S)/\L$1/ig;
my @words = $plain_text =~ /\b\S+\b/ig;
#my $stemmer = Lingua::Stem::Snowball->new( lang => 'en' )
+;
$stemmer->stem_in_place( \@words );
foreach my $x (@words)
{
if($x =~ /^[A-Za-z0-9]+$/ and $x !~ /(http:\/\/(\S+)\
+b)|(&?nbsp)|(\b.*\d{5,
}.*\b)|(
^\d+$)|(\S{32,})/)
{
print OUT "$x ";
}
}
print OUT "\n";
print OUT '+-+-+-+-+', "\n";
}
}
}
can any monks give me suggestions here?