Python web-scraping: problem with soup.select - web-scraping

I'm developing a python script to scrape data from a specific site: https://finance.yahoo.com/quote/AUDUSD%3DX/history?p=AUDUSD%3DX
I'm using BeautifulSoup. The interesting data on this page are :
I'm using soup.select method this time, the class name is W(100%) M(0) and my code is as below:
import requests
import pandas as pd
from bs4 import BeautifulSoup
url = "https://finance.yahoo.com/quote/AUDUSD%3DX/history?p=AUDUSD%3DX"
soup = BeautifulSoup(requests.get(url).content, "html.parser")
table = soup.select(table:has(-soup-contains("W(100%) M(0)")))
print(table)
And this does not generate the result I want.
I have also tried this way:
import requests
import pandas as pd
from bs4 import BeautifulSoup
url = "https://finance.yahoo.com/quote/AUDUSD%3DX/history?p=AUDUSD%3DX"
soup = BeautifulSoup(requests.get(url).content, "html.parser")
table = soup.select("W(100%) M(0)")
print(table)
And there is error as shown below
Traceback (most recent call last):
File "/Users/ryanngan/PycharmProjects/Webscraping/seek.py", line 8, in <module>
table = soup.select("W(100%) M(0)")
File "/Users/ryanngan/PycharmProjects/Webscraping/venv/lib/python3.9/site-packages/bs4/element.py", line 1973, in select
results = soupsieve.select(selector, self, namespaces, limit, **kwargs)
File "/Users/ryanngan/PycharmProjects/Webscraping/venv/lib/python3.9/site-packages/soupsieve/__init__.py", line 144, in select
return compile(select, namespaces, flags, **kwargs).select(tag, limit)
File "/Users/ryanngan/PycharmProjects/Webscraping/venv/lib/python3.9/site-packages/soupsieve/__init__.py", line 67, in compile
return cp._cached_css_compile(pattern, ns, cs, flags)
File "/Users/ryanngan/PycharmProjects/Webscraping/venv/lib/python3.9/site-packages/soupsieve/css_parser.py", line 218, in _cached_css_compile
CSSParser(
File "/Users/ryanngan/PycharmProjects/Webscraping/venv/lib/python3.9/site-packages/soupsieve/css_parser.py", line 1159, in process_selectors
return self.parse_selectors(self.selector_iter(self.pattern), index, flags)
File "/Users/ryanngan/PycharmProjects/Webscraping/venv/lib/python3.9/site-packages/soupsieve/css_parser.py", line 985, in parse_selectors
key, m = next(iselector)
File "/Users/ryanngan/PycharmProjects/Webscraping/venv/lib/python3.9/site-packages/soupsieve/css_parser.py", line 1152, in selector_iter
raise SelectorSyntaxError(msg, self.pattern, index)
soupsieve.util.SelectorSyntaxError: Invalid character '(' position 1
line 1:
W(100%) M(0)
How can I scrape the above data using the soup.select method? Thank you very much.

Using direct class selectors (e.g. .W(100%)) breaks because it's invalid CSS selector syntax.
However, you can get around this using contains syntax which is expressed through attribute*=partial:
import requests
from bs4 import BeautifulSoup
headers = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/62.0.3202.94 Safari/537.36",
"Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8",
}
response = requests.get(
"https://finance.yahoo.com/quote/AUDUSD%3DX/history?p=AUDUSD%3DX",
headers=headers
)
# select any element where class contains "W(100%)" and class contains "M(0)":
soup = BeautifulSoup(response.text)
table = soup.select('[class*="W(100%)"][class*="M(0)"]')

Related

Scraping an href

I was wondering if someone could help me scrape an href tag and clean it up. I am trying to scrape the url from the big "Visit Website" button on this page: https://www.goodfirms.co/software/inflow-inventory, and then clean it up a little bit.
Code:
url = 'https://www.goodfirms.co/software/inflow-inventory'
page = requests.get(url)
time.sleep(2)
soup = bs(page.content, 'lxml')
try:
url = soup.find("div", class_="entity-detail-header-visit-website")
except AttributeError:
url = "Couldn't Find"
Print(url)
Output Print:
<div class="entity-detail-header-visit-website">
<a class="visit-website-btn" href="https://www.inflowinventory.com/?utm_source=goodfirms&utm_medium=profile" rel="nofollow" target="_blank">Visit website</a>
</div>
Desired Output:
https://www.inflowinventory.com
This will get you what you need:
import requests
from bs4 import BeautifulSoup
headers= {'User-Agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/102.0.0.0 Safari/537.36'}
r = requests.get('https://www.goodfirms.co/software/inflow-inventory', headers=headers)
soup = BeautifulSoup(r.text, 'html.parser')
link = soup.select_one('a.visit-website-btn')
print(link['href'].split('/?utm')[0])
Result:
https://www.inflowinventory.com
Documentation for BeautifulSoup can be found at:
https://www.crummy.com/software/BeautifulSoup/bs4/doc/
Try this code to get #href value
url = soup.find("a", class_="visit-website-btn").get('href')
Having complete URL you can get base with
from urllib.parse import urlsplit
print(urlsplit(url).netloc)
# www.inflowinventory.com
"div", class_="entity-detail-header-visit-website" detects the same url two times with html content. So .a.get('href') with find() method will pull the righ url
import requests
from bs4 import BeautifulSoup
url = 'https://www.goodfirms.co/software/inflow-inventory'
page = requests.get(url)
soup = BeautifulSoup(page.content, 'lxml')
link = soup.find("div", class_="entity-detail-header-visit-website").a.get('href')
print(link)
Output:
https://www.inflowinventory.com/?utm_source=goodfirms&utm_medium=profile
If you are looking for a solution according to your code then it is like this.
import requests
from bs4 import BeautifulSoup
url = 'https://www.goodfirms.co/software/inflow-inventory'
page = requests.get(url)
soup = BeautifulSoup(page.content, 'lxml')
try:
url = soup.find("div", class_="entity-detail-header-visit-website")
print(url.a.get('href'))
except AttributeError:
url = "Couldn't Find"
print(url)
Result :
https://www.inflowinventory.com/?utm_source=goodfirms&utm_medium=profile

Accessing websites in a dropdown list

I'm trying to build a web scraper that visits school district websites and retrieves the names and websites of the schools. I'm using https://www.dallasisd.org/ to test the code below.
I'm currently stuck on how to 1) only access the dropdown list of 'Schools' and 2) retrieve the links in the <li> tags in the same dropdown.
Any help would be much appreciated! Thank you.
from bs4 import BeautifulSoup
from selenium import webdriver
import urllib.request
import requests
import re
import xlwt
import pandas as pd
import xlrd
from xlutils.copy import copy
import os.path
hdr = { 'User-Agent' : 'Mozilla/5.0 (Windows NT 6.1; Win64; x64)' }
browser = webdriver.Chrome()
url = 'https://www.dallasisd.org/'
browser.get(url)
html_source = browser.page_source
browser.quit()
soup = BeautifulSoup(html_source, "lxml")
for name_list in soup.find_all(class_ ='sw-dropdown-list'):
print(name_list.text)
The dropdown lists of elementary schools are contained in the <div id="cs-elementary-schools-panel" [...]> which you could access prior to finding all and obtain the links:
from bs4 import BeautifulSoup
import requests
headers = {
'user-agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/67.0.3396.99 Safari/537.36'
}
url = 'https://www.dallasisd.org/'
req = requests.get(url, headers=headers)
soup = BeautifulSoup(req.content, 'html.parser')
dropdown = soup.find('div', attrs={'id': "cs-elementary-schools-panel"})
for link in dropdown.find_all('li', attrs={'class': "cs-panel-item"}):
print("Url: https://www.dallasisd.org" + link.find('a')['href'])
You can easily extend this code to the Middle and High schools

Scape issue in beautiful soup, NoneType' object has no attribute 'find_all

Trying to execute this code for scraping the specific websites / rss feeds metioned here below
keep getting :
Traceback (most recent call last):
File "C:\Users\Jeanne\Desktop\PYPDIT\pyscape.py", line 28, in
transcripts = [url_to_transcript(u) for u in urls]
File "C:\Users\Jeanne\Desktop\PYPDIT\pyscape.py", line 28, in
transcripts = [url_to_transcript(u) for u in urls]
File "C:\Users\Jeanne\Desktop\PYPDIT\pyscape.py", line 17, in url_to_transcript
text = [p.text for p in soup.find(class_="itemcontent").find_all('p')]
AttributeError: 'NoneType' object has no attribute 'find_all'
Please advise.
import requests
from bs4 import BeautifulSoup
import pickle
def url_to_transcript(url):
page = requests.get(url).text
soup = BeautifulSoup(page, "lxml")
text = [p.text for p in soup.find(class_="itemcontent").find_all('p')]
print(url)
return text
URLs of transcripts in scope
urls = ['http://feeds.nos.nl/nosnieuwstech',
'http://feeds.nos.nl/nosnieuwsalgemeen']
transcripts = [url_to_transcript(u) for u in urls]
The html returned is not the same as you see on the page. You can use the following:
import requests
from bs4 import BeautifulSoup
# import pickle
urls = ['http://feeds.nos.nl/nosnieuwstech','http://feeds.nos.nl/nosnieuwsalgemeen']
with requests.Session() as s:
for url in urls:
page = s.get(url).text
soup = BeautifulSoup(page, "lxml")
print(url)
print([[i.text for i in desc.select('p')] for desc in soup.select('description')[1:]])
print('--'*100)

web scraping using BeautifulSoup: reading tables

I'm trying to get data from a table on transfermarkt.com. I was able to get the first 25 entry with the following code. However, I need to get the rest of the entries which are in the following pages. When I clicked on the second page, url does not change.
I tried to increase the range in the for loop but it gives an error. Any suggestion would be appreciated.
import numpy as np
import pandas as pd
import requests
from bs4 import BeautifulSoup
url = 'https://www.transfermarkt.com/spieler-statistik/wertvollstespieler/marktwertetop'
heads = {'User-Agent' : 'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML,
like Gecko) Chrome/70.0.3538.110 Safari/537.36'}
r = requests.get(url, headers = heads)
source = r.text
soup = BeautifulSoup(source, "html.parser")
players = soup.find_all("a",{"class":"spielprofil_tooltip"})
values = soup.find_all("td",{"class":"rechts hauptlink"})
playerslist = []
valueslist = []
for i in range(0,25):
playerslist.append(players[i].text)
valueslist.append(values[i].text)
df = pd.DataFrame({"Players":playerslist, "Values":valueslist})
Alter the url in the loop and also change your selectors
import requests
from bs4 import BeautifulSoup as bs
import pandas as pd
players = []
values = []
headers = {'User-Agent':'Mozilla/5.0'}
with requests.Session() as s:
for page in range(1,21):
r = s.get(f'https://www.transfermarkt.com/spieler-statistik/wertvollstespieler/marktwertetop?ajax=yw1&page={page}', headers=headers)
soup = bs(r.content,'lxml')
players += [i.text for i in soup.select('.items .spielprofil_tooltip')]
values += [i.text for i in soup.select('.items .rechts.hauptlink')]
df = pd.DataFrame({"Players":players, "Values":values})

I have scrapy script, but I can not scrape data, don't knew why

I run the script, but I got none, but there are data on the url
# -*- coding: utf-8 -*-
import scrapy
from scrapy.selector import Selector
class GetSpider(scrapy.Spider):
name = 'gets'
start_urls = ['https://www.retailmenot.com/coupons/insurance?u=ZTF65B5PJZEU3JDF326WY2SXOQ']
def parse(self, response):
s = Selector(response)
code = s.xpath("//button[contains(#class,'CopyCode')][1]/text()").get()
yield {'code':code}
I expect 52YR, but i got None
The easiest way to go about this is probably to load the json in the script as a python dictionary and navigate through it to get to the codes.
The below code should get you started:
import scrapy
import json
import logging
class GetSpider(scrapy.Spider):
name = 'gets'
start_urls = ['https://www.retailmenot.com/coupons/insurance?u=ZTF65B5PJZEU3JDF326WY2SXOQ']
headers = {
'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_5) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/75.0.3770.142 Safari/537.36',
}
custom_settings = {'ROBOTSTXT_OBEY': False}
def start_requests(self):
for url in self.start_urls:
yield scrapy.Request(url,
callback=self.parse,
headers=self.headers,
dont_filter=True)
def parse(self, response):
script = response.xpath(
'//script[contains(text(), "__NEXT_DATA__")]/text()'
).extract_first()
dict_start_index = script.index('{')
dict_end_index = script.index('};') + 1
data = json.loads(script[dict_start_index:dict_end_index])
coupon_data = data['props']['pageProps']['serverState']['apollo']['data']
for key, value in coupon_data.items():
try:
code = value['code']
except KeyError:
logging.debug("no code found")
else:
yield {'code': code}

Resources