telethon.TelegramClient not working when calling from Dash callback - telegram

I am kind of a newbie with dash and I try to call a function that creates a telethon TelegramClient instance and send a message when triggerd by a Dash callback, but it looks like the line
client = TelegramClient(phone_number, api, hash)
is running forever without any error message
but when I run from outside the dash app just as python script it work fine..
does anyone have an idea to make it work ?
Here is my code :
helper.py :
layout = html.Div(
[
html.Div(
dbc.NavbarSimple(
children=[
dbc.NavItem(dbc.NavLink("Telegram User and Localisation", href='/telegram-exp')),
dbc.DropdownMenu(
children=[
dbc.DropdownMenuItem("More pages", header=True),
],
nav=True,
in_navbar=True,
label="Tools"
),
],
brand='test',
color='primary',
dark=True
)
),
dbc.Row(
[
dbc.Col(html.Div(
[
dbc.Input(
id='message',
placeholder='message',
n_submit=0,
),
html.P(id='text'),
],
)),
]
),
]
)
#app.callback(
Output('text', 'data'),
Input('message', 'n_submit'),
State('message', 'value'),
prevent_initial_callback=True
)
def update_loc(inpt, stat):
return send_message(stat)
send.py
def send_message(msg):
client = TelegramClient(phone_number, api_ID, API_hash)
async def main():
await client.send_message('me', msg)
return msg
with client:
client.loop.run_until_complete(main())
I change the code to be more generic but the main question is why TelegramClient running for ever without any error message, and how to solve it..
Thanks

Related

FastAPI RuntimeError: Use params or add_pagination

I'm writing my second project on FastAPI. And I got this error.
For example I have this code in my routers.users.py:
#router.get('/', response_model=Page[Users])
async def get_all_users(db: Session = Depends(get_db)):
return paginate(db.query(models.User).order_by(models.User.id))
And it works. It has fields limit and page in swagger documentation.
I tried to write the same for routers.recipes.py, but in this case I have no fields for pagination(limit, page) in swagger. Ok, I googled and found out that adding dependencies could help me. And now I see pagination parameters in swagger, but error is still the same.
routers.recipes:
#router.get('/', response_model=Page[PostRecipes], dependencies=[Depends(Params)])
async def get_all_recipes(db: Session = Depends(get_db)):
return paginate(db.query(models.Recipe).order_by(models.Recipe.id))
pagination:
class Params(BaseModel, AbstractParams):
page: int = Query(1, ge=1, description="Page number")
limit: int = Query(50, ge=1, le=100, description="Page size")
def to_raw_params(self) -> RawParams:
return RawParams(
limit=self.limit,
offset=self.limit * (self.page - 1),
)
class Page(BasePage[T], Generic[T]):
page: conint(ge=1) # type: ignore
limit: conint(ge=1) # type: ignore
__params_type__ = Params
#classmethod
def create(
cls,
items: Sequence[T],
total: int,
params: AbstractParams,
) -> Page[T]:
if not isinstance(params, Params):
raise ValueError("Page should be used with Params")
return cls(
total=total,
items=items,
page=params.page,
limit=params.limit,
)
__all__ = [
"Params",
"Page",
]
So, does anyone have ideas about it?
according to doc you have to specify default parameters,
your code should look like paginate(iterable, params)

Read nested JSON fields in Airflow?

I have an API that returns some nested json data that I would like airflow to read. However, airflow is giving me an error saying that the nested fields are NULL. These fields are not NULL and I can see the json data in any manual get requests I make to the API.
How do I modify my pipeline in order to read these nested fields?
My API returns a JSON object like:
{
"email": "ronald#mcdonald.com",
"first_name": "ronald",
"last_name": "mcdonald",
"permissions": {
"make_burgers": true,
"make_icecream": false,
},
}
My airflow pipeline object looks like:
class StaffPipeline(_DefaultPipeline):
source_url = f'{config.MCDONALDS_BASE_URL}/staff'
table_config = TableConfig(
table_name='mcdonalds__staff',
field_mapping=[
('email', sa.Column('email', sa.Text)),
('first_name', sa.Column('first_name', sa.Text)),
('last_name', sa.Column('last_name', sa.Text)),
('make_burgers', sa.Column('make_burgers', sa.Boolean)),
('make_icecream', sa.Column('make_icecream', sa.Boolean)),
],
indexes=(LateIndex("email"), LateIndex("last_name")),
)
My error message when trying to run this pipeline:
raise UnusedColumnError(error)
mcdonalds_data.operators.db_tables.UnusedColumnError: Column mcdonalds__staff_123456789.make_burgers only contains NULL values
Thank you!
The answer is that you have to use a nested strucutre in airflow like so:
class StaffPipeline(_DefaultPipeline):
source_url = f'{config.MCDONALDS_BASE_URL}/staff'
table_config = TableConfig(
table_name='mcdonalds__staff',
field_mapping=[
('email', sa.Column('email', sa.Text)),
('first_name', sa.Column('first_name', sa.Text)),
('last_name', sa.Column('last_name', sa.Text)),
(
("permissions", "make_burgers"),
sa.Column('make_burgers', sa.Boolean),
),
(
("permissions", "make_icecream"),
sa.Column('make_icecream', sa.Boolean),
),
],
indexes=(LateIndex("email"), LateIndex("last_name")),
)

How to return true if x data exists in JSON or CSV from API on Wordpress website

is there any easy method to call APIs from Wordpress website and return true or false, depends if some data is there?
Here is the API:
https://api.covalenthq.com/v1/137/address/0x3FEb1D627c96cD918f2E554A803210DA09084462/balances_v2/?&format=JSON&nft=true&no-nft-fetch=true&key=ckey_docs
here is a JSON:
{
"data": {
"address": "0x3feb1d627c96cd918f2e554a803210da09084462",
"updated_at": "2021-11-13T23:25:27.639021367Z",
"next_update_at": "2021-11-13T23:30:27.639021727Z",
"quote_currency": "USD",
"chain_id": 137,
"items": [
{
"contract_decimals": 0,
"contract_name": "PublicServiceKoalas",
"contract_ticker_symbol": "PSK",
"contract_address": "0xc5df71db9055e6e1d9a37a86411fd6189ca2dbbb",
"supports_erc": [
"erc20"
],
"logo_url": "https://logos.covalenthq.com/tokens/137/0xc5df71db9055e6e1d9a37a86411fd6189ca2dbbb.png",
"last_transferred_at": "2021-11-13T09:45:36Z",
"type": "nft",
"balance": "0",
"balance_24h": null,
"quote_rate": 0.0,
"quote_rate_24h": null,
"quote": 0.0,
"quote_24h": null,
"nft_data": null
}
],
"pagination": null
},
"error": false,
"error_message": null,
"error_code": null
}
I want to check if there is "PSK" in contract_ticker_symbol, if it exist and "balance" is > 0 ... then return true.
Is there any painless method because I'm not a programmer...
The Python requests library can handle this. You'll have to install it with pip first (package installer for Python).
I also used a website called JSON Parser Online to see what was going on with all of the data first so that I would be able to make sense of it in my code:
import requests
def main():
url = "https://api.covalenthq.com/v1/137/address/0x3FEb1D627c96cD918f2E554A803210DA09084462/balances_v2/?&format" \
"=JSON&nft=true&no-nft-fetch=true&key=ckey_docs "
try:
response = requests.get(url).json()
for item in response['data']['items']:
# First, find 'PSK' in the list
if item['contract_ticker_symbol'] == "PSK":
# Now, check the balance
if item['balance'] == 0:
return True
else:
return False
except requests.ConnectionError:
print("Exception")
if __name__ == "__main__":
print(main())
This is what is going on:
I am pulling all of the data from the API.
I am using a try/except clause because I need the code to
handle if I can't make a connection to the site.
I am looping through all of the 'items' to find the correct 'item'
that includes the contract ticker symbol for 'PSK'.
I am checking the balance in that item and returning the logic that you wanted.
The script is running itself at the end, but you can always just rename this function and have some other code call it to check it.

FastAPI File(s) must be put before form in function parameters

I have an endpoint that takes a file and a string parameter that I pass through a form body. But I noticed when debugging that:
import uvicorn
from fastapi import FastAPI, File, Form
app = FastAPI()
#app.post('/test')
def test(test_item: str = Form(...), test_file: bytes = File(...)):
return {
"test_item": test_item,
"test_file_len": len(test_file),
"test_file_contents": test_file.decode('utf-8')
}
if __name__ == '__main__':
uvicorn.run(app, host="0.0.0.0", port=8000)
using this simple curl command with the test_file.txt having some text in it:
curl localhost:8000/test -X POST -F test_file=#"test_file.txt" -F test_item="test"
didn't work with this error:
{
"detail": [
{
"loc": [
"body",
"test_file"
],
"msg": "byte type expected",
"type": "type_error.bytes"
}
]
}
but interestingly, this did work:
import uvicorn
from fastapi import FastAPI, File, Form
app = FastAPI()
#app.post('/test')
def test(test_file: bytes = File(...), test_item: str = Form(...)):
return {
"test_item": test_item,
"test_file_len": len(test_file),
"test_file_contents": test_file.decode('utf-8')
}
if __name__ == '__main__':
uvicorn.run(app, host="0.0.0.0", port=8000)
the only difference being that you get the File before the Form element. Does anybody know why that would happen? Looks like it's required to upload files before forms. Maybe the file gets killed when Form parses the http form body. I didn't see anything on the FastAPI documentation about this for Form and Files.
This was working in FastApi 0.43.0.
This is a regression introduced in https://github.com/tiangolo/fastapi/commit/ab2b86fe2ce8fe15e91aaec179438e24ff7b7ed0

Scrapy: Can't login to dual-login page: onestop.jdsu.com

I am developing a scraper for internal use and evaluation of my company's partner website onestop.jdsu.com. The website is actually ASPX site.
I can't get scrapy to login to the page: https://onestop.jdsu.com/_layouts/JDSU.OneStop/Login.aspx?ReturnUrl=%2f_layouts%2fAuthenticate.aspx%3fSource%3d%252F&Source=%2F
There are actually two methods of login on the page and I think I'm having a problem with distinguishing them in the scrapy spider. The one I'm most interested in is the "partner login" although login using the employee login, which is actually a script that displays a drop-down login window, would be fine.
I've used "loginform" to extract the relevant fields from both forms. Unfortunately no combination of relevant POST data seems to make a difference. Perhaps I'm not clicking the button on the partner form ("ctl00$PlaceHolderMain$loginControl$login","")?
Also the "Login failed" message does not come through even when I know the login had to have failed.
The spider below ignores "__VIEWSTATE" and "__EVENTVALIDATION" because they don't make a difference if included and they don't seem to have anything to do with the partner login in the HTML of the page.
Any help would be very much appreciated!
LOGINFORM TEST OUTPUT
python ./test.py https://onestop.jdsu.com/_layouts/JDSU.OneStop/Login.aspx?ReturnUrl=%2f_layouts%2fAuthenticate.aspx%3fSource%3d%252F&Source=%2F
[1] 1273
peter-macbook:_loginform-master peter$ [
"https://onestop.jdsu.com/_layouts/JDSU.OneStop/Login.aspx?ReturnUrl=%2f_layouts%2fAuthenticate.aspx%3fSource%3d%252F",
[
[
[
"__VIEWSTATE",
"/wEPDwUKMTEzNDkwMDAxNw9kFgJmD2QWAgIBD2QWAgIDD2QWCAIDDxYCHgdWaXNpYmxlaGQCBQ8WAh8AaGQCCw9kFgYCAQ8WAh4EaHJlZgUhL193aW5kb3dzL2RlZmF1bHQuYXNweD9SZXR1cm5Vcmw9ZAIDD2QWAgIDDw8WAh8AaGRkAgUPFgIfAGhkAg0PFgIfAGgWAgIBDw8WAh4ISW1hZ2VVcmwFIS9fbGF5b3V0cy8xMDMzL2ltYWdlcy9jYWxwcmV2LnBuZ2RkZP7gVj0vs2N5c/DzKfAu4DwrFihP"
],
[
"__EVENTVALIDATION",
"/wEWBALlpOFKAoyn3a4JAuj7pusEAsXI9Y8HY+WYdEUkWKmn7tesA+BODBefeYE="
],
[
"ctl00$PlaceHolderMain$loginControl$UserName",
"USER"
],
[
"ctl00$PlaceHolderMain$loginControl$password",
"PASS"
],
[
"ctl00$PlaceHolderMain$loginControl$login",
""
]
],
"https://onestop.jdsu.com/_layouts/JDSU.OneStop/Login.aspx?ReturnUrl=%2f_layouts%2fAuthenticate.aspx%3fSource%3d%252F",
"POST"
]
]
SCRAPY SPIDER FOR PARTNER LOGIN
import scrapy
from tutorial.items import WaveReadyItem
#from scrapy import log
#from scrapy.shell import inspect_response
class WaveReadySpider(scrapy.Spider):
name = "onestop_home-page-3"
allowed_domains = ["https://onestop.jdsu.com"]
start_urls = [
"https://onestop.jdsu.com/_layouts/JDSU.OneStop/Login.aspx?ReturnUrl=%2f_layouts%2fAuthenticate.aspx%3fSource%3d%252F&Source=%2F",
"https://onestop.jdsu.com/Products/network-systems/Pages/default.aspx"
]
def parse(self, response):
return scrapy.FormRequest.from_response(
response,
formdata={'ctl00$PlaceHolderMain$loginControl$UserName': 'MY-USERID', 'ctl00$PlaceHolderMain$loginControl$password': 'MY-PASSWD', 'ctl00$PlaceHolderMain$loginControl$login': ''},
callback=self.after_login
)
def after_login(self, response):
# check login succeed before going on
if "Invalid ID or Password" in response.body:
self.log("Login failed", level=log.ERROR)
return
def parse(self, response):
#=============================================================================
#HOME PAGE: PICK UP OTHER LANDING PAGES IN CENTER COLUMN
#=============================================================================
etc.
I don't know why you fail. But here is how I use "loginform".
def parse(self, response):
args, url, method = fill_login_form(response.url, response.body, self.username, self.password)
return FormRequest(url, method=method, formdata=args, callback=self.after_login)
The fill_login_form method will try its best to locate the correct form of login. And then it will return everything needed to perform a Login. If you fill in the form manually, something may be missed.

Resources