#!/usr/bin/env perl use 5.016; use common::sense; use utf8::all; # Use fast binary libraries use EV; use Web::Scraper::LibXML; use YADA 0.039; use Data::Structure::Util qw( unbless ); YADA->new( common_opts => { # Available opts @ http://curl.haxx.se/libcurl/c/curl_easy_setopt.html encoding => '', followlocation => 1, maxredirs => 5, }, http_response => 1, max => 4, )->append([qw[ http://foo.qa/release ]] => sub { my ($self) = @_; return if $self->has_error or not $self->response->is_success or not $self->response->content_is_html; # Declare the scraper once and then reuse it state $scraper = scraper { process q(ol > .message > .innerContainer > .messageInfo > .messageContent), q(op) => { q(links[]) => scraper{ process q(a), q(shit[]) => q(@href);}, post => q(TEXT) }; }; # Employ amazing Perl (en|de)coding powers to handle HTML charsets my @doc = $scraper->scrape( $self->response->decoded_content, $self->final_url, ); # print "$_\n" for @{$doc->{op} // []}; print Data::Dumper->Dump( \@doc ); # print $doc->{op}."\n" })->wait;