I want links and all the content from each link - web-scraping

I searched for a keyword (cybersecurity) on a newspaper website and the results show around 10 articles. I want my code to grab the link and go to that link and get the whole article and repeat this to all the 10 articles in the page. (I don't want the summary, I want the whole article)
import urllib.request
import ssl
import time
from bs4 import BeautifulSoup
ssl._create_default_https_context = ssl._create_unverified_context
pages = [1]
for page in pages:
data = urllib.request.urlopen("https://www.japantimes.co.jp/tag/cybersecurity/page/{}".format(page))
soup = BeautifulSoup(data, 'html.parser')
for article in soup.find_all('div', class_="content_col"):
link = article.p.find('a')
print(link.attrs['href'])
for link in links:
headline = link.h1.find('div', class_= "padding_block")
headline = headline.text
print(headline)
content = link.p.find_all('div', class_= "entry")
content = content.text
print(content)
print()
time.sleep(3)
This is not working.
date = link.li.find('time', class_= "post_time")
Showing error :
AttributeError: 'NoneType' object has no attribute 'find'
This code is working and grabbing all the articles links. I want to include code that will add headline and content from every article link.
import urllib.request
import ssl
import time
from bs4 import BeautifulSoup
ssl._create_default_https_context = ssl._create_unverified_context
pages = [1]
for page in pages:
data = urllib.request.urlopen("https://www.japantimes.co.jp/tag/cybersecurity/page/{}".format(page))
soup = BeautifulSoup(data, 'html.parser')
for article in soup.find_all('div', class_="content_col"):
link = article.p.find('a')
print(link.attrs['href'])
print()
time.sleep(3)

Try the following script. It will fetch you all the titles along with their content. Put the highest number of pages you wanna go across.
import requests
from bs4 import BeautifulSoup
url = 'https://www.japantimes.co.jp/tag/cybersecurity/page/{}'
pages = 4
for page in range(1,pages+1):
res = requests.get(url.format(page))
soup = BeautifulSoup(res.text,"lxml")
for item in soup.select(".content_col header p > a"):
resp = requests.get(item.get("href"))
sauce = BeautifulSoup(resp.text,"lxml")
title = sauce.select_one("header h1").text
content = [elem.text for elem in sauce.select("#jtarticle p")]
print(f'{title}\n{content}\n')

Related

For Loop Not Repeating

I'm trying to find the hrefs for all the states where this company has stores, however it only finds the href for the first state.
Can anyone figure out why the for loop doesn't repeat for the rest of the states? Thank you very much for your help!
import requests
from bs4 import BeautifulSoup
import csv
# website
sitemap = 'website_url'
# content of website
sitemap_content = requests.get(sitemap).content
# parsing website
soup = BeautifulSoup(sitemap_content, 'html.parser')
#print(soup)
list_of_divs = soup.findAll('div', attrs={'class':'listings-inner'})
#print(list_of_divs)
header = ['Links']
with open ('/Users/ABC/Desktop/v1.csv','wt') as csvfile:
writer = csv.writer(csvfile, delimiter ="\t" )
writer.writerow(header)
for state in list_of_divs:
# get the url's by state
print(state.find('div', attrs={'class':'itemlist'}).a.get('href'))
rows = [state.find('div', attrs={'class':'itemlist'}).a.get('href')]
writer.writerow(rows)
list_of_divs actually only contains one element, which is the only div on the page with class listings-inner. So when you iterate through all of it's elements and use the find method, it'll only return the first result.
You want to use the find_all method on that div:
import requests
from bs4 import BeautifulSoup
sitemap = 'https://stores.dollargeneral.com/'
sitemap_content = requests.get(sitemap).content
soup = BeautifulSoup(sitemap_content, 'html.parser')
listings_div = soup.find('div', attrs={'class':'listings-inner'})
for state in listings_div.find_all('div', attrs={'class':'itemlist'}):
print(state.a.get('href'))

Beautiful Soup Pagination, find_all not finding text within next_page class. Need also to extract data from URLS

I've been working on this for a week and am determined to get this working!
My ultimate goal is to write a webscraper where you can insert the county name and the scraper will produce a csv file of information from mugshots - Name, Location, Eye Color, Weight, Hair Color and Height (it's a genetics project I am working on).
The site organization is primary site page --> state page --> county page -- 120 mugshots with name and url --> url with data I am ultimately after and next links to another set of 120.
I thought the best way to do this would be to write a scraper that will grab the URLs and Names from the table of 120 mugshots and then use pagination to grab all the URLs and names from the rest of the county (in some cases there are 10's of thousands). I can get the first 120, but my pagination doesn't work.. so Im ending up with a csv of 120 names and urls.
I closely followed this article which was very helpful
from bs4 import BeautifulSoup
import requests
import lxml
import pandas as pd
county_name = input('Please, enter a county name: /Arizona/Maricopa-County-AZ \n')
print(f'Searching {county_name}. Wait, please...')
base_url = 'https://www.mugshots.com'
search_url = f'https://mugshots.com/US-Counties/{county_name}/'
data = {'Name': [],'URL': []}
def export_table_and_print(data):
table = pd.DataFrame(data, columns=['Name', 'URL'])
table.index = table.index + 1
table.to_csv('mugshots.csv', index=False)
print('Scraping done. Here are the results:')
print(table)
def get_mugshot_attributes(mugshot):
name = mugshot.find('div', attrs={'class', 'label'})
url = mugshot.find('a', attrs={'class', 'image-preview'})
name=name.text
url=mugshot.get('href')
url = base_url + url
data['Name'].append(name)
data['URL'].append(url)
def parse_page(next_url):
page = requests.get(next_url)
if page.status_code == requests.codes.ok:
bs = BeautifulSoup(page.text, 'lxml')
list_all_mugshot = bs.find_all('a', attrs={'class', 'image-preview'})
for mugshot in list_all_mugshot:
get_mugshot_attributes(mugshot)
next_page_text = mugshot.find('a class' , attrs={'next page'})
if next_page_text == 'Next':
next_page_text=mugshot.get_text()
next_page_url=mugshot.get('href')
next_page_url=base_url+next_page_url
print(next_page_url)
parse_page(next_page_url)
else:
export_table_and_print(data)
parse_page(search_url)
Any ideas on how to get the pagination to work and also how to eventually get the data from the list of URLs I scrape?
I appreciate your help! I've been working in python for a few months now, but the BS4 and Scrapy stuff is so confusing for some reason.
Thank you so much community!
Anna
It seems you want to know the logic as to how you can get the content using populated urls derived from each of the page traversing next pages. This is how you can parse all the links from each page including next page and then use those links to get the content from their inner pages.
import requests
from bs4 import BeautifulSoup
from urllib.parse import urljoin
url = "https://mugshots.com/"
base = "https://mugshots.com"
def get_next_pages(link):
print("**"*20,"current page:",link)
res = requests.get(link)
soup = BeautifulSoup(res.text,"lxml")
for item in soup.select("[itemprop='name'] > a[href^='/Current-Events/']"):
yield from get_main_content(urljoin(base,item.get("href")))
next_page = soup.select_one(".pagination > a:contains('Next')")
if next_page:
next_page = urljoin(url,next_page.get("href"))
yield from get_next_pages(next_page)
def get_main_content(link):
res = requests.get(link)
soup = BeautifulSoup(res.text,"lxml")
item = soup.select_one("h1#item-title > span[itemprop='name']").text
yield item
if __name__ == '__main__':
for elem in get_next_pages(url):
print(elem)

'None' returned when web scraping using beautiful soup using find()

I am trying to pick the FTSE price from the bbc website using BeautifulSoup & Requests but I get the output 'None' when I run it.
import sys
import requests
from bs4 import BeautifulSoup
URL = 'https://www.bbc.co.uk/news/topics./c9qdqqkgz27t/ftse-100'
page = requests.get(URL,timeout=5)
#fetch content from URL
soup = BeautifulSoup(page.content,'html.parser')
#parse html content
price = soup.find(class_='gel-paragon nw-c-md-market-summary_value')
#price = soup.find("div", class_="gel-paragon nw-c-md-market-summary_value")
#find class with name 'gel...'
print(price)
I've tried using different types of the find function but both return the same. I plan to use this logic to gather data from multiple pages ultimately but want to get it right before I try to iterate.
Your url was wrong, i did few edits and it works!
import requests
from bs4 import BeautifulSoup
URL = 'https://www.bbc.co.uk/news/topics/c9qdqqkgz27t/ftse-100'
page = requests.get(URL)
soup = BeautifulSoup(page.content,'html.parser')
price = soup.find('div', attrs={
'class':'gel-paragon nw-c-md-market-summary__value'})
print(price.text)
Output:
7442.28
This works perfectly:
import requests
from bs4 import BeautifulSoup
url = 'https://www.bbc.com/news/topics/c9qdqqkgz27t/ftse-100'
r = requests.get(url)
soup = BeautifulSoup(r.text, 'lxml')
price = soup.select_one('div.gel-paragon')
print(price.text)
Output:
7418.34
Note: Try 'html.parser' if you don't have 'lxml'

Get the last page Number of a wabpage - Beautiful Soup

I'm trying to get the page number of the last page of this website
http://digitalmoneytimes.com/category/crypto-news/
This links shows that the last page number is 335 but i can't extract the page number.
soup = BeautifulSoup(page.content, 'html.parser')
soup_output= soup.find_all("li",{"class":"active"})
soup_output=soup.select(tag)
print(soup_output)
I get an empty list as the output
In order to get the last page of the given website, I would strongly recommend you to use the following code:
import requests
from bs4 import BeautifulSoup
page = requests.get("http://digitalmoneytimes.com/category/crypto-news/")
soup = BeautifulSoup(page.content, 'html.parser')
soup = soup.find_all("a", href = True)
pages = []
for x in soup:
if "http://digitalmoneytimes.com/category/crypto-news/page/" in str(x):
pages.append(x)
last_page = pages[2].getText()
where last_page is equal to the last page. Due to the fact that I don't have access to your tag and page variables I can't really tell you where is the problem in your code.
Really hope this solves your problem.
If it is about getting the last page number, there is something you might try out as well:
import requests
from bs4 import BeautifulSoup
link = 'http://digitalmoneytimes.com/category/crypto-news/'
res = requests.get(url)
soup = BeautifulSoup(res.text,"lxml")
last_page_num = soup.find(class_="pagination-next").find_previous_sibling().text
print(last_page_num)
Output:
336

scrape the next pages in python using Beautifulsoup

I want to scrape the links from each page and move on to the next pages and do the same. here is my code to scrape links from the first page:
import requests
from bs4 import BeautifulSoup
page='https://www.booli.se/slutpriser/goteborg/22/?objectType=L%C3%A4genhet'
request = requests.get(page)
soup = BeautifulSoup(request.text,'lxml')
links= soup.findAll('a',class_='search-list__item')
url=[]
prefix = "https://www.booli.se"
for link in links:
url.append(prefix+link["href"])
I tried the following for the first three pages, but it didn't work.
import re
import requests
from bs4 import BeautifulSoup
url=[]
prefix = "https://www.booli.se"
with requests.Session() as session:
for page in range(4):
response = session.get("https://www.booli.se/slutpriser/goteborg/22/?
objectType=L%C3%A4genhet&page=%f" % page)
soup = BeautifulSoup(response.content, "html.parser")
links= soup.findAll('a',class_='search-list__item')
for link in links:
url.append(prefix+link["href"])
First you have to create code that is working fine with one page.
Then you have to put your scraping code in loop
url = "https://www.booli.se/slutpriser/goteborg/22/?objectType=L%C3%A4genhet&page=1"
while True:
code goes here
You will notice there is a page=number at the end of the link.
You have to figure to run loop on these url with changing the page=number
i=1
url = "https://www.booli.se/slutpriser/goteborg/22/?objectType=L%C3%A4genhet&page=" + str(i)
while True:
i = i+1
page = requests.get(url)
if page.status_code != 200:
break
url = "https://www.booli.se/slutpriser/goteborg/22/?objectType=L%C3%A4genhet&page=" + str(i)
#Your scraping code goes here
#
#
I have used if statement so that the loop does not goes forever. It will go upto the last page.
Yes, I did it. Thank you. Here is the code for the first two pages:
urls=[]
for page in range(3):
urls.append("https://www.booli.se/slutpriser/goteborg/22/?
objectType=L%C3%A4genhet&page={}".format(page))
page=urls[1:]
#page
import requests
from bs4 import BeautifulSoup
inturl=[]
for page in page:
request = requests.get(page)
soup = BeautifulSoup(request.text,'lxml')
links= soup.findAll('a',class_='search-list__item')
prefix = "https://www.booli.se"
for link in links:
inturl.append(prefix+link["href"])

Resources