Common Crawl Request returns 403 WARC

548 Views Asked by At

I am trying to crawl some WARC files from the common crawls archives, but I do not seem to get successful requests through to the server. A minimal python example below is provided below to replicate the error. I tried adding the UserAgent in the request header, but it did help. Any ideas on how to proceed?

import io
import time
import justext # >= 2.2.0
import argparse
import requests # >= 2.23.0
import pandas as pd # pandas >= 1.0.3
from tqdm import tqdm
from warcio.archiveiterator import ArchiveIterator warcio >= 1.7.3


def debug():

    common_crawl_data = {"filename":"crawl-data/CC-MAIN-2016-07/segments/1454702018134.95/warc/CC-MAIN-20160205195338-00121-ip-10-236-182-209.ec2.internal.warc.gz",
                     "offset":244189209,
                     "length":989
                     }

    offset, length = int(common_crawl_data['offset']), int(common_crawl_data['length'])
    offset_end = offset + length - 1

    prefix = 'https://commoncrawl.s3.amazonaws.com/'

    resp = requests.get(prefix + common_crawl_data['filename'], headers={'Range': 'bytes={}-{}'.format(offset, offset_end)})
    raw_data = io.BytesIO(resp.content)

    uri = None
    page = None
    
    for record in ArchiveIterator(raw_data, arc2warc=True):
        uri = record.rec_headers.get_header('WARC-Target-URI')
        R = record.content_stream().read()
        try:
            page = R.strip().decode('utf-8')
        except:
            page = R.strip().decode('latin1')
        print(uri, page)
    return uri, page

debug()
0

There are 0 best solutions below