import requests
from bs4 import BeautifulSoup
import pandas as pd
baseurl='https://books.toscrape.com/'
headers ={
    'User-Agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.114 Safari/537.36'
}
r =requests.get('https://books.toscrape.com/' )
soup=BeautifulSoup(r.content, 'html.parser')
productlinks=[]
Title=[]
Brand=[]
tra = soup.find_all('article',class_='product_pod')
for links in tra:
    for link in links.find_all('a',href=True)[1:]:
        comp=baseurl+link['href']
        productlinks.append(comp)
for link in productlinks:
    r =requests.get(link,headers=headers)
    soup=BeautifulSoup(r.content, 'html.parser')
    try:
        title=soup.find('h3').text
    except:
        title=' '
    Title.append(title)
    price=soup.find('p',class_="price_color").text.replace('£','').replace(',','').strip()
    Brand.append(price)
df = pd.DataFrame(
    
    {"Title": Title, "Price": price}
)
print(df)
The above script was working as expected but I want scrape inforamtion of each  product such asupc, product type
example to get information of these single page
https://books.toscrape.com/catalogue/a-light-in-the-attic_1000/index.html
to scrape upc ,product type etc... all other information lies in product information
 
                        
You can use
start=parameter in URL to get next pages:Prints: