How can I show more than 100 results per page? - web-scraping

I want to change the number of results on this page: https://fifatracker.net/players/ to more than 100 and then export the table to Excel and make it much easier for me. I tried to scrape it using python following a tutorial but I can't make it work. If there is a way to extract the table from all the pages it would also help me.

As stated, it's restricted to 100 per request. Simply iterate through the query payload on the api to get each page:
import pandas as pd
import requests
url = 'https://fifatracker.net/api/v1/players/'
headers = {'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/90.0.4430.85 Safari/537.36'}
page= 1
payload = {
"pagination":{
"per_page":"100","page":page},
"filters":{
"attackingworkrate":[],
"defensiveworkrate":[],
"primarypositions":[],
"otherpositions":[],
"nationality":[],
"order_by":"-overallrating"},
"context":{
"username":"guest",
"slot":"1","season":1},
"currency":"eur"}
jsonData = requests.post(url, headers=headers, json=payload).json()
current_page = jsonData['pagination']['current_page']
last_page = jsonData['pagination']['last_page']
dfs = []
for page in range(1,last_page+1):
if page == 1:
pass
else:
payload['pagination']['page'] = page
jsonData = requests.post(url, headers=headers, json=payload).json()
players = pd.json_normalize(jsonData['result'])
dfs.append(players)
print('Page %s of %s' %(page,last_page))
df = pd.concat(dfs).reset_index(drop=True)
Output:
print(df)
slug ... info.contract.loanedto_clubname
0 lionel-messi ... NaN
1 cristiano-ronaldo ... NaN
2 robert-lewandowski ... NaN
3 neymar-jr ... NaN
4 kevin-de-bruyne ... NaN
... ... ...
19137 levi-kaye ... NaN
19138 phillip-cancar ... NaN
19139 julio-pérez ... NaN
19140 alan-mclaughlin ... NaN
19141 tatsuki-yoshitomi ... NaN
[19142 rows x 92 columns]

Related

Resource not found nse india even with using Curl

So i am scrapping the nse india results calendar site via using curl command and even after that its giving me "Resource not Found" error . Here's my code
url = f"https://www.nseindia.com/api/event-calendar?index=equities"
header1 ="Host:www.nseindia.com"
header2 = "User-Agent:Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:82.0) Gecko/20100101 Firefox/82.0"
header3 ="Accept:text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8"
header4 ="Accept-Language:en-US,en;q=0.5"
header5 ="Accept-Encoding:gzip, deflate, br"
header6 ="DNT:1"
header7 ="Connection:keep-alive"
header8 ="Upgrade-Insecure-Requests:1"
header9 ="Pragma:no-cache"
header10 ="Cache-Control:no-cache"
def run_curl_command(curl_command, max_attempts):
result = os.popen(curl_command).read()
count = 0
while "Resource not found" in result and count < max_attempts:
result = os.popen(curl_command).read()
count += 1
time.sleep(1)
print("API Read")
result = json.loads(result)
result = pd.DataFrame(result)
def init():
max_attempts = 100
curl_command = f'curl "{url}" -H "{header1}" -H "{header2}" -H "{header3}" -H "{header4}" -H "{header5}" -H "{header6}" -H "{header7}" -H "{header8}" -H "{header9}" -H "{header10}" --compressed '
print(f"curl_command : {curl_command}")
run_curl_command(curl_command, max_attempts)
init()

How can I webscrape these ticker symbols from barchart.com?

I am trying to use Beautiful Soup to webscrape the list of ticker symbols from this page: https://www.barchart.com/options/most-active/stocks
My code returns a lot of HTML from the page, but I can't find any of the ticker symbols with CTRL+F. Would be much appreciated if someone could let me know how I can access these!
Code:
from bs4 import BeautifulSoup as bs
import requests
headers = {'user-agent': "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/87.0.4280.141 Safari/537.36"}
url = "https://www.barchart.com/options/most-active/stocks"
page = requests.get(url, headers=headers)
html = page.text
soup = bs(html, 'html.parser')
print(soup.find_all())
import requests
from urllib.parse import unquote
import pandas as pd
headers = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:90.0) Gecko/20100101 Firefox/90.0",
}
def main(url):
with requests.Session() as req:
req.headers.update(headers)
r = req.get(url[:25])
req.headers.update(
{'X-XSRF-TOKEN': unquote(r.cookies.get_dict()['XSRF-TOKEN'])})
params = {
"list": "options.mostActive.us",
"fields": "symbol,symbolType,symbolName,hasOptions,lastPrice,priceChange,percentChange,optionsImpliedVolatilityRank1y,optionsTotalVolume,optionsPutVolumePercent,optionsCallVolumePercent,optionsPutCallVolumeRatio,tradeTime,symbolCode",
"orderBy": "optionsTotalVolume",
"orderDir": "desc",
"between(lastPrice,.10,)": "",
"between(tradeTime,2021-08-03,2021-08-04)": "",
"meta": "field.shortName,field.type,field.description",
"hasOptions": "true",
"page": "1",
"limit": "500",
"raw": "1"
}
r = req.get(url, params=params).json()
df = pd.DataFrame(r['data']).iloc[:, :-1]
print(df)
main('https://www.barchart.com/proxies/core-api/v1/quotes/get?')
Output:
symbol symbolType ... tradeTime symbolCode
0 AMD 1 ... 08/03/21 STK
1 AAPL 1 ... 08/03/21 STK
2 TSLA 1 ... 08/03/21 STK
3 AMC 1 ... 08/03/21 STK
4 PFE 1 ... 08/03/21 STK
.. ... ... ... ... ...
495 BTU 1 ... 08/03/21 STK
496 EVER 1 ... 08/03/21 STK
497 VRTX 1 ... 08/03/21 STK
498 MCHP 1 ... 08/03/21 STK
499 PAA 1 ... 08/03/21 STK
[500 rows x 14 columns]

RDash - getting error while refresh the page and Upload the csv file

This is Dash-R code for Uploading csv file.
Following Error I while refresh the page:
error: non-character argument
request: 127.0.0.1 - ID_127.0.0.1 [15/Jul/2020:22:22:38 +0530] "POST /_dash-update-component HTTP/1.1" 500 0 "http://127.0.0.1:8050/" "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/79.0.3945.130 Safari/537.36"
while i am uploading csv file, i am getting following Error"
error: could not find function "base64_dec"
request: 127.0.0.1 - ID_127.0.0.1 [15/Jul/2020:22:23:08 +0530] "POST /_dash-update-component HTTP/1.1" 500 0 "http://127.0.0.1:8050/" "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/79.0.3945.130 Safari/537.36"
library(dash)
library(dashCoreComponents)
library(dashHtmlComponents)
library(dashTable)
app <- Dash$new()
app$layout(htmlDiv(list(
dccUpload(
id='upload-data',
children=htmlDiv(list(
'Drag and Drop or ',
htmlA('Select Files')
)),
style=list(
'width'= '100%',
'height'= '60px',
'lineHeight'= '60px',
'borderWidth'= '1px',
'borderStyle'= 'dashed',
'borderRadius'= '5px',
'textAlign'= 'center',
'margin'= '10px'
),
# Allow multiple files to be uploaded
multiple=TRUE
),
htmlDiv(id='output-data-upload')
)))
parse_contents = function(contents, filename, date){
content_type = strsplit(contents, ",")
content_string = strsplit(contents, ",")
decoded = base64_dec(content_string)
if('csv' %in% filename){
df = read.csv(utf8::as_utf8(decoded))
} else if('xls' %in% filename){
df = read.table(decoded, encoding = 'bytes')
} else{
return(htmlDiv(list(
'There was an error processing this file.'
)))
}
return(htmlDiv(list(
htmlH5(filename),
htmlH6(anytime(date)),
dashDataTable(df_to_list('records'),columns = lapply(colnames(df), function(x){list('name' = x, 'id' = x)})),
htmlHr(),
htmlDiv('Raw Content'),
htmlPre(paste(substr(toJSON(contents), 1, 100), "..."), style=list(
'whiteSpace'= 'pre-wrap',
'wordBreak'= 'break-all'
))
)))
}
app$callback(
output = list(id='output-data-upload', property = 'children'),
params = list(input(id = 'upload-data', property = 'contents'),
state(id = 'upload-data', property = 'filename'),
state(id = 'upload-data', property = 'last_modified')),
function(list_of_contents, list_of_names, list_of_dates){
if(is.null(list_of_contents) == FALSE){
children = lapply(1:length(list_of_contents), function(x){
parse_contents(list_of_contents[[x]], list_of_names[[x]], list_of_dates[[x]])
})
}
return(children)
})
app$run_server()
I am having the same problem following the dashr example to use the upload component to read a csv file. I detected a few lines that are not working properly, but I am still unabled to get a data-frame from the file in a staightforward way.
Regarding the error: could not find function "base64_dec", I found that jsonlite package has a function "base64_dec" that seems to do what is intended. You can indicate the package when calling the function:
decoded = jsonlite::base64_dec(content_string)
Regarding the error: non-character argument, it is generated in this line when loading the app, because "contents" is still empty:
#This gives an error if run before reading the data
content_type = strsplit(contents, ",")
#Anyway that should be like below, because the content are given in a list and you want the first element
content_type = strsplit(contents, ",")[[1]][1]
Dash runs the callback at the begining of the app, but here we need the function to execute after selecting a file. Here the condition in the if statement is not doing its job:
#Will execute the code in the if even before selecting data:
if(is.null(list_of_contents) == FALSE)
#Will exectue only when data is selected
if(length(list_of_contents[[1]])>0)
The main issue is that when you have the decoded binary file, read.csv can't read it (at least not how it is in the example code given, because its input is the filename). Something that partially worked for me is this readBin() example, but you need to be aware of the size of the table, which is not practical for my case, because it will be different every time.
This is the complete code I modified to solve the issues you found, but the core part of reading the CSV is not functional. I also modified the conditions to proceed if the file selected is a CSV of Excel file (because they were not working properly):
library(dashCoreComponents)
library(dashHtmlComponents)
library(dash)
library(anytime)
app <- Dash$new()
app$layout(htmlDiv(list(
dccUpload(
id='upload-data',
children=htmlDiv(list(
'Drag and Drop or ',
htmlA('Select Files')
)),
style=list(
'width'= '100%',
'height'= '60px',
'lineHeight'= '60px',
'borderWidth'= '1px',
'borderStyle'= 'dashed',
'borderRadius'= '5px',
'textAlign'= 'center',
'margin'= '10px'
),
# Allow multiple files to be uploaded
multiple=TRUE
),
htmlDiv(id='output-data-upload')
)))
parse_contents = function(contents, filename, date){
print("Inside function parse")
content_type = strsplit(contents, ",")[[1]][1]
content_string = strsplit(contents, ",")[[1]][2]
#print(content_string)
decoded = jsonlite::base64_dec(content_string)
#print(decoded)
if(grepl(".csv", filename, fixed=TRUE)){
print("csv file selected")
## Here a function to read a csv file from the binary data is needed
## Because read.csv asks for the file NAME.
## readBin() can read it, but you need to know the size of the table to parse properly
#as.data.frame(readBin(decoded, character()))
#df = read.csv(utf8::as_utf8(decoded))
} else if(grepl(".xlsx", filename, fixed=TRUE)){
##Also to read the Excel
df = read.table(decoded, encoding = 'bytes')
} else{
return(htmlDiv(list(
'There was an error processing this file.'
)))
}
return(htmlDiv(list(
htmlH5(filename),
htmlH6(anytime(date)),
dashDataTable(df_to_list('records'),columns = lapply(colnames(df), function(x){list('name' = x, 'id' = x)})),
htmlHr(),
htmlDiv('Raw Content'),
htmlPre(paste(substr(toJSON(contents), 1, 100), "..."), style=list(
'whiteSpace'= 'pre-wrap',
'wordBreak'= 'break-all'
))
)))
}
app$callback(
output = list(id='output-data-upload', property = 'children'),
params = list(input(id = 'upload-data', property = 'contents'),
state(id = 'upload-data', property = 'filename'),
state(id = 'upload-data', property = 'last_modified')),
function(list_of_contents, list_of_names, list_of_dates){
if(length(list_of_contents[[1]])>0){
print("Inside if")
children = lapply(1:length(list_of_contents), function(x){
parse_contents(list_of_contents[[x]], list_of_names[[x]], list_of_dates[[x]])
})
return(children)
}
})
app$run_server()
I hope they revise this example to make it work.

R -RMySQL- how to save more sql queries to file?

I do some data analysis in R. On end of script I want save my results to file. I know there is more options how to do it, but they don't work properly. When I try sink() it works but it give me :
<MySQLResult:1,5,1>
host logname user time request_fline status
1 142.4.5.115 - - 2018-01-03 12:08:58 GET /phpmyadmin?</script><script>alert('<!--VAIBS-->');</script><script> HTTP/1.1 400
size_varchar referer agent ip_adress size_int cookie time_microsec filename
1 Mozilla/5.0 (Windows NT 10.0; WOW64; rv:51.0) Gecko/20100101 Firefox/51.0 445 - 142.4.5.115 445 - 159 -
request_protocol keepalive request_method contents_of_foobar contents_of_notefoobar port child_id
<MySQLResult:1,5,1>
[1] host logname user time request_fline status
[7] size_varchar referer agent ip_adress size_int cookie
<0 rows> (or 0-length row.names)
which is totally unusable because I cant export that type of data. If I try write.table it give file with one row which is possible read but after one row R skript and give me error : Error in isOpen(file, "w") : invalid connection and when I try write.csv result is same. And when I try lapply it give me just empty file.
There is my code :
fileConn<-file("outputX.txt")
fileCon2<-file("outputX.csv")
sink("outputQuery.txt")
for (i in 1:length(awq)){
sql <- paste("SELECT * FROM mtable ORDER BY cookie LIMIT ", awq[i], ",1")
nb <- dbGetQuery(mydb, sql)
print (nb)
write.table(nb, file = fileConn, append = TRUE, quote = FALSE, sep = " ", eol = "\n", na = "NA", row.names = FALSE, col.names = FALSE)
write.csv(nb, file = fileCon2,row.names=FALSE, sep=" ")
lapply(nb, write, fileConn, append=TRUE, ncolumns=7)
writeLines(unlist(lapply(nb, paste, collapse=" ")))
}
sink()
close(fileConn)
close(fileCon2)
I am new in R, so I don't know what else should I try.What I want is 1 file where data will be print in form which is easy to read and export. For example tike this :
142.4.5.115 - - 2018-01-03 12:08:58 GET /phpmyadmin?/><!--VAIBS--> HTTP/1.1 400 Mozilla/5.0 (Windows NT 10.0; WOW64; rv:51.0) Gecko/20100101 Firefox/51.0 445 - 142.4.5.115 445 - 145 - HTTP/1.1 0 GET - - 80 7216 ?/><!--VAIBS--> GET /phpmyadmin?/><!--VAIBS--> HTTP/1.1 - 0 /phpmyadmin - 354 0
142.4.5.115 - - 2018-01-03 12:10:23 GET /phpmyadmin?/><!--VAIBS--> HTTP/1.1 400 Mozilla/5.0 (Windows NT 10.0; WOW64; rv:51.0) Gecko/20100101 Firefox/51.0 445 - 142.4.5.115 445 - 145 - HTTP/1.1 0 GET - - 80 7216 ?/><!--VAIBS--> GET /phpmyadmin?/><!--VAIBS--> HTTP/1.1 - 0 /phpmyadmin - 354 0
142.4.5.115 - - 2018-01-03 12:12:41 GET /phpmyadmin?/><!--VAIBS--> HTTP/1.1 400 Mozilla/5.0 (Windows NT 10.0; WOW64; rv:51.0) Gecko/20100101 Firefox/51.0 445 - 142.4.5.115 445 - 145 - HTTP/1.1 0 GET - - 80 7216 ?/><!--VAIBS--> GET /phpmyadmin?/><!--VAIBS--> HTTP/1.1 - 0 /phpmyadmin - 354 0
142.4.5.115 - - 2018-01-03 12:15:29 GET /phpmyadmin?/><!--VAIBS--> HTTP/1.1 400 Mozilla/5.0 (Windows NT 10.0; WOW64; rv:51.0) Gecko/20100101 Firefox/51.0 445 - 142.4.5.115 445 - 145 - HTTP/1.1 0 GET - - 80 7216 ?/><!--VAIBS--> GET /phpmyadmin?/><!--VAIBS--> HTTP/1.1 - 0 /phpmyadmin - 354 0
or this :
host,logname,user,time, request_fline status,size_varchar,referer agent,ip_adress,size_int,cookie,time_microsec,filename,request_protocol,keepalive,request_method,contents_of_foobar,contents_of_notefoobar port child_id
1 142.4.5.115 - - 2018-01-03 12:08:58 GET /phpmyadmin?/><!--VAIBS--> HTTP/1.1 400 Mozilla/5.0 (Windows NT 10.0; WOW64; rv:51.0) Gecko/20100101 Firefox/51.0 445 - 142.4.5.115 445 - 145 - HTTP/1.1 0 GET - - 80 7216 ?/><!--VAIBS--> GET /phpmyadmin?/><!--VAIBS--> HTTP/1.1 - 0 /phpmyadmin - 354 0
2 142.4.5.115 - - 2018-01-03 12:10:23 GET /phpmyadmin?/><!--VAIBS--> HTTP/1.1 400 Mozilla/5.0 (Windows NT 10.0; WOW64; rv:51.0) Gecko/20100101 Firefox/51.0 445 - 142.4.5.115 445 - 145 - HTTP/1.1 0 GET - - 80 7216 ?/><!--VAIBS--> GET /phpmyadmin?/><!--VAIBS--> HTTP/1.1 - 0 /phpmyadmin - 354 0
3 142.4.5.115 - - 2018-01-03 12:12:41 GET /phpmyadmin?/><!--VAIBS--> HTTP/1.1 400 Mozilla/5.0 (Windows NT 10.0; WOW64; rv:51.0) Gecko/20100101 Firefox/51.0 445 - 142.4.5.115 445 - 145 - HTTP/1.1 0 GET - - 80 7216 ?/><!--VAIBS--> GET /phpmyadmin?/><!--VAIBS--> HTTP/1.1 - 0 /phpmyadmin - 354 0
4 142.4.5.115 - - 2018-01-03 12:15:29 GET /phpmyadmin?/><!--VAIBS--> HTTP/1.1 400 Mozilla/5.0 (Windows NT 10.0; WOW64; rv:51.0) Gecko/20100101 Firefox/51.0 445 - 142.4.5.115 445 - 145 - HTTP/1.1 0 GET - - 80 7216 ?/><!--VAIBS--> GET /phpmyadmin?/><!--VAIBS--> HTTP/1.1 - 0 /phpmyadmin - 354 0
or something similar. Best of all, will be some help how to write write.table in loop without error. But I will welcome any functional solution. Best what I have is :
sql <- paste("SELECT * FROM idsaccess ORDER BY cookie LIMIT ", awq[1], ",1")
nb <- dbGetQuery(mydb, sql)
write.table(nb, file = fileConn, append = TRUE, quote = FALSE, sep = " ", eol = "\n", na = "NA", row.names = FALSE, col.names = FALSE)
fileConn<-file("outputX1.txt")
sql <- paste("SELECT * FROM idsaccess ORDER BY cookie LIMIT ", awq[2], ",1")
nb <- dbGetQuery(mydb, sql)
write.table(nb, file = fileConn, append = true, quote = FALSE, sep = " ", eol = "\n", na = "NA", row.names = FALSE, col.names = FALSE)
But this give every query to own file. And I don't want have every query in own file. Any help ?
Simply concatenate all query dataframes into one large dataframe since they all share same structure, and then output to file in one call which is really the typical way to use write.table or its wrapper, write.csv:
Specifically, turn for loop:
for (i in 1:length(awq)){
sql <- paste("SELECT * FROM mtable ORDER BY cookie LIMIT ", awq[i], ",1")
nb <- dbGetQuery(mydb, sql)
}
Into lapply for a list of dataframes:
df_list <- lapply(1:length(awq), function(i) {
sql <- paste0("SELECT * FROM mtable ORDER BY cookie LIMIT ", awq[i], ",1")
nb <- dbGetQuery(mydb, sql)
})
Then, row bind with do.call to stack all dfs into a single dataframe and output to file:
final_df <- do.call(rbind, df_list)
write.table(final_df, file = "outputX.txt", append = true, quote = FALSE, sep = " ",
eol = "\n", na = "NA", row.names = FALSE, col.names = FALSE)

How to get table from html form using rvest or httr?

I am using R, version 3.3.1. I am trying to scrap data from following web site:
http://plovila.pomorstvo.hr/
As you can see, it is a HTML form. I would like to choose "Tip objekta" (object type), for example "Jahta" (Yacht) and enter "NIB" (which is an integer, eg. 93567). You can try yourself; just choose "Jahta" and type 93567 in NIB field.
Method is POST, type application/x-www-form-urlencoded. I have tried 3 different approaches: using rvest, POST (httr package) and postForm (Rcurl). My rvest code is:
session <- html_session("http://plovila.pomorstvo.hr")
form <- html_form(session)[[1]]
form <- set_values(form, `ctl00$Content_FormContent$uiTipObjektaDropDown` = 2,
`ctl00$Content_FormContent$uiOznakaTextBox` = "",
`ctl00$Content_FormContent$uiNibTextBox` = 93567)
x <- submit_form(session, form)
If I run this code and get 200 status but I don't understand how can I get the table:
Additional step is to submit Detalji button and get additional information, but I can't see any information from x submit output.
I used the curlconverter package to take the "Copy as cURL" data from the XHR POST request and turn it automagically into:
httr::VERB(verb = "POST", url = "http://plovila.pomorstvo.hr/",
httr::add_headers(Origin = "http://plovila.pomorstvo.hr",
`Accept-Encoding` = "gzip, deflate",
`Accept-Language` = "en-US,en;q=0.8",
`X-Requested-With` = "XMLHttpRequest",
Connection = "keep-alive",
`X-MicrosoftAjax` = "Delta=true",
Pragma = "no-cache", `User-Agent` = "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_0) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/54.0.2840.34 Safari/537.36",
Accept = "*/*", `Cache-Control` = "no-cache",
Referer = "http://plovila.pomorstvo.hr/",
DNT = "1"), httr::set_cookies(ASP.NET_SessionId = "b4b123vyqxnt4ygzcykwwvwr"),
body = list(`ctl00$uiScriptManager` = "ctl00$Content_FormContent$ctl00|ctl00$Content_FormContent$uiPretraziButton",
ctl00_uiStyleSheetManager_TSSM = ";|635908784800000000:d29ba49:3cef4978:9768dbb9",
`ctl00$Content_FormContent$uiTipObjektaDropDown` = "2",
`ctl00$Content_FormContent$uiImeTextBox` = "",
`ctl00$Content_FormContent$uiNibTextBox` = "93567",
`__EVENTTARGET` = "", `__EVENTARGUMENT` = "",
`__LASTFOCUS` = "", `__VIEWSTATE` = "/wEPDwUKMTY2OTIzNTI1MA9kFgJmD2QWAgIDD2QWAgIBD2QWAgICD2QWAgIDD2QWAmYPZBYIAgEPZBYCZg9kFgZmD2QWAgIBDxAPFgYeDURhdGFUZXh0RmllbGQFD05heml2VGlwT2JqZWt0YR4ORGF0YVZhbHVlRmllbGQFDElkVGlwT2JqZWt0YR4LXyFEYXRhQm91bmRnZBAVBAAHQnJvZGljYQVKYWh0YQbEjGFtYWMVBAEwATEBMgEzFCsDBGdnZ2cWAQICZAIBDw8WAh4HVmlzaWJsZWdkFgICAQ8PFgIfA2dkZAICDw8WAh8DaGQWAgIBDw8WBB4EVGV4dGUfA2hkZAIHDzwrAA4CABQrAAJkFwEFCFBhZ2VTaXplAgoBFgIWCw8CCBQrAAhkZGRkZDwrAAUBBAUHSWRVcGlzYTwrAAUBBAUISWRVbG9za2E8KwAFAQQFBlNlbGVjdGRlFCsAAAspelRlbGVyaWsuV2ViLlVJLkdyaWRDaGlsZExvYWRNb2RlLCBUZWxlcmlrLldlYi5VSSwgVmVyc2lvbj0yMDEzLjMuMTExNC40MCwgQ3VsdHVyZT1uZXV0cmFsLCBQdWJsaWNLZXlUb2tlbj0xMjFmYWU3ODE2NWJhM2Q0ATwrAAcACyl1VGVsZXJpay5XZWIuVUkuR3JpZEVkaXRNb2RlLCBUZWxlcmlrLldlYi5VSSwgVmVyc2lvbj0yMDEzLjMuMTExNC40MCwgQ3VsdHVyZT1uZXV0cmFsLCBQdWJsaWNLZXlUb2tlbj0xMjFmYWU3ODE2NWJhM2Q0ARYCHgRfZWZzZGQWBB4KRGF0YU1lbWJlcmUeBF9obG0LKwQBZGZkAgkPZBYCZg9kFgJmD2QWIAIBD2QWBAIDDzwrAAgAZAIFDzwrAAgAZAIDD2QWBAIDDzwrAAgAZAIFDzwrAAgAZAIFD2QWAgIDDzwrAAgAZAIHD2QWBAIDDzwrAAgAZAIFDzwrAAgAZAIJD2QWBAIDDzwrAAgAZAIFDzwrAAgAZAILD2QWBgIDDxQrAAI8KwAIAGRkAgUPFCsAAjwrAAgAZGQCBw8UKwACPCsACABkZAIND2QWBgIDDxQrAAI8KwAIAGRkAgUPFCsAAjwrAAgAZGQCBw8UKwACPCsACABkZAIPD2QWAgIDDxQrAAI8KwAIAGRkAhEPZBYGAgMPPCsACABkAgUPPCsACABkAgcPPCsACABkAhMPZBYGAgMPPCsACABkAgUPPCsACABkAgcPPCsACABkAhUPZBYCAgMPPCsACABkAhcPZBYGAgMPPCsACABkAgUPPCsACABkAgcPPCsACABkAhkPPCsADgIAFCsAAmQXAQUIUGFnZVNpemUCBQEWAhYLZGRlFCsAAAsrBAE8KwAHAAsrBQEWAh8FZGQWBB8GZR8HCysEAWRmZAIbDzwrAA4CABQrAAJkFwEFCFBhZ2VTaXplAgUBFgIWC2RkZRQrAAALKwQBPCsABwALKwUBFgIfBWRkFgQfBmUfBwsrBAFkZmQCHQ88KwAOAgAUKwACZBcBBQhQYWdlU2l6ZQIFARYCFgtkZGUUKwAACysEATwrAAcACysFARYCHwVkZBYEHwZlHwcLKwQBZGZkAiMPPCsADgIAFCsAAmQXAQUIUGFnZVNpemUCBQEWAhYLZGRlFCsAAAsrBAE8KwAHAAsrBQEWAh8FZGQWBB8GZR8HCysEAWRmZAILD2QWAmYPZBYCZg9kFgICAQ88KwAOAgAUKwACZBcBBQhQYWdlU2l6ZQIFARYCFgtkZGUUKwAACysEATwrAAcACysFARYCHwVkZBYEHwZlHwcLKwQBZGZkZIULy2JISPTzELAGqWDdBkCVyvvKIjo/wm/iG9PT1dlU",
`__VIEWSTATEGENERATOR` = "CA0B0334",
`__PREVIOUSPAGE` = "jGgYHmJ3-6da6PzGl9Py8IDr-Zzb75YxIFpHMz4WQ6iQEyTbjWaujGRHZU-1fqkJcMyvpGRkWGStWuj7Uf3NYv8Wi0KSCVwn435kijCN2fM1",
`__ASYNCPOST` = "true",
`ctl00$Content_FormContent$uiPretraziButton` = "Pretraži"),
encode = "form") -> res
You can see the result of that via:
content(res, as="text") # returns raw HTML
or
content(res, as="parsed") # returns something you can use with `rvest` / `xml2`
Unfortunately, this is yet another useless SharePoint website that "eGov" sites around the world have bought into as a good thing to do. That means you have to do trial and error to figure out which of those parameters is necessary since it's different on virtually every site. I tried a minimal set to no avail.
You may even have to issue a GET request to the main site first to establish a session.
But this should get you going in the right direction.

Resources