Why i have the "Interaction Failed" message after clicking on any button on embed : - button

I recently tried to create a command with discord_components Buttons to create a channel logs and a welcome channel, the bot asks if there is a channel logs or welcome, and if the user answers "No" then the bot creates a channel logs or welcome. Normally I should get the 'Finish' at the end of the command message but instead I get the Interaction Failed error.
#bot.command(name="configserver")
#has_permissions(administrator=True)
#bot_has_permissions(administrator=True)
async def configserver(ctx):
def check(res):
return ctx.author == res.user and res.channel == ctx.channel
try:
file = open("file", "x")
except FileExistsError:
os.remove("file")
file = open("file", "a")
guild: discord.Guild = ctx.guild
embed_button = await ctx.send(embed=discord.Embed(
title="Configuration",
description="There is a **logs** channel ?",
color=get_color(0x3ef76f, 0xe8f73e, 0xf73e3e)),
components=[[
Button(style=ButtonStyle.green, label="Yes"),
Button(style=ButtonStyle.red, label="No")
]])
# For channel logs
res = await bot.wait_for("button_click", check=check)
if res.component.label == "Yes":
await res.respond(
content=await embed_button.edit(embed=discord.Embed(
title="Configuration",
description="There is a **welcome** channel ?",
color=get_color(0x3ef76f, 0xe8f73e, 0xf73e3e)),
components=[[
Button(style=ButtonStyle.green, label="Yes"),
Button(style=ButtonStyle.red, label="No")
]])
)
res = await bot.wait_for("button_click", check=check)
await res.respond(
type=InteractionType.ChannelMessageWithSource,
content='Finish'
)
elif res.component.label == "No":
permissions_logs = {
guild.default_role: discord.PermissionOverwrite(read_messages=False),
}
channel_logs: discord.TextChannel = await guild.create_text_channel('logs', overwrites=permissions_logs)
print(channel_logs.id)
file.write(f"channel_logs = {channel_logs.id}\n")
await res.respond(
content=await embed_button.edit(embed=discord.Embed(
title="Configuration",
description="There is a **welcome** channel ?",
color=get_color(0x3ef76f, 0xe8f73e, 0xf73e3e)),
components=[[
Button(style=ButtonStyle.green, label="Yes"),
Button(style=ButtonStyle.red, label="No")
]])
)
# For channel logs
res = await bot.wait_for("button_click", check=check)
if res.component.label == "Yes":
print('Yes')
await res.respond(
type=InteractionType.ChannelMessageWithSource,
content='Finish'
)
elif res.component.label == "No":
print('No')
permissions_welcome = {
guild.default_role: discord.PermissionOverwrite(write_messages=False),
}
channel_welcome = await guild.create_text_channel('welcome', overwrites=permissions_welcome)
print(channel_welcome.id)
file.write(f"channel_welcome = {channel_welcome.id}\n")
await res.respond(
type=InteractionType.ChannelMessageWithSource,
content='Finish'
)

Related

taking input as a bot and setting it to a variable telethon

im making a bot and i have a question, this is the code:
#bot.on(events.CallbackQuery)
async def handler(event):
if event.data == b"3":
await event.respond("x", buttons=x)
if event.data == b"7":
await event.respond("x", buttons=x)
if event.data == b"9":
await event.respond("insert x")
#bot.on(events.NewMessage)
async def main(event):
messaggio =
and i have no idea how to set "messaggio" to x
bot.on(events.CallbackQuery)
async def handler(event):
if event.data == b"3":
await event.respond("x, buttons=x)
if event.data == b"7":
await event.respond("x?", buttons=x)
if event.data == b"9":
await event.respond("insert x")
#bot.on(events.NewMessage(chats="x"))
async def handler(event):
print(str("messaggio: ") + event.text)
messaggio = event.text

SQLAlchemy 1.4.0b1 AsyncSession issue

I'm using SQLAlchemy 1.4.0b1's AsyncSession to update a Postgres db with asyncpg 0.21.0. The code below aims to update objects and add new objects in response to various incoming Redis stream messages
The save_revised coroutine (update) is working fine, and so is the session.add part of the td_move coroutine. However the update part of td_move, at the bottom of the function (starting from if this_train_id and msg.get('from') in finals[crossing]) , only works intermittently : I'm getting some db updates but only ~1/3 or so of the log messages indicating that an update is wanted.
Can anyone suggest what the problem(s) could be please ?
async def main():
logger.info(f"db_updater starting {datetime.now().strftime('%H:%M:%S')}")
engine = create_async_engine(os.getenv('ASYNC_DB_URL'), future=True)
async with AsyncSession(engine) as session:
crossings, headcodes, lean_params, finals, active_trains, train_ids, berthtimes, hc_types = await get_db_data(logger) # noqa: E501
pool = await aioredis.create_redis_pool(('redis', 6379), db=0, password=os.getenv('REDIS_PW'), encoding='utf-8')
last_id = '$'
while True:
all_msgs = await pool.xread(['del_hc_s', 'xing_revised', 'all_td', 'add_hc_s'], latest_ids=[last_id, last_id, last_id, last_id]) # noqa: E501
for stream_name, msg_id, msg in all_msgs:
message = dict(msg)
crossing = message.get('crossing')
if stream_name == 'all_td':
await td_move(message, train_ids, active_trains, finals, lean_params, session)
elif stream_name == 'xing_revised':
await save_revised(message, lean_params[crossing], session)
async def save_revised(msg, params, session):
train_id = msg.get('train_id')
# today_class is a SQLA model class from declarative_base()
today_class = params['today_class']
rev_time = datetime.fromtimestamp(
int(msg.get('revised')))
stmt = update(today_class).where(today_class.train_id == train_id).\
values(xing_revised=rev_time).\
execution_options(synchronize_session="fetch")
await session.execute(stmt)
if msg.get('revised_ten') != 'X':
stmt2 = update(today_class).where(today_class.train_id == train_id).\
values(xing_revised_ten=rev_time).\
execution_options(synchronize_session="fetch")
await session.execute(stmt2)
await session.commit()
async def td_move(msg, train_ids, active_trains, finals, params, session):
crossing = msg.get('crossing')
descr = msg.get('descr')
if crossing:
this_train_id = [s for s in train_ids[crossing] if descr in s]
if this_train_id:
this_train_id = this_train_id[0]
else:
return
if this_train_id and active_trains[crossing].get(this_train_id) and (
is_within_minutes(30, active_trains[crossing].get(this_train_id))):
# Td_Ca_Cc is a SQLA model class from declarative_base()
td = Td_Ca_Cc(
msg_type=msg.get('msg_type'),
descr=msg.get('descr'),
traintype=active_trains[crossing].get(
this_train_id).get('train_type'),
from_berth=msg.get('from'),
to_berth=msg.get('to'),
tdtime=dt_from_timestamp(msg.get('time')),
seconds=0,
area_id=msg.get('area_id'),
updated=datetime.now(),
crossing=crossing
)
session.add(td)
if this_train_id and msg.get('from') in finals[crossing]:
today_class = params[crossing]['today_class']
stmt = update(today_class).where(today_class.train_id == this_train_id).\
values(xing_actual=datetime.now(), cancel_time='XXX').\
execution_options(synchronize_session="fetch")
await session.execute(stmt)
logger.info(f"{crossing} {msg.get('descr')} passed {datetime.now().strftime('%H:%M:%S')}")
await session.commit()
if __name__ == '__main__':
asyncio.run(main())
For future reference the problem was making a (sync) logging call. I removed this (and will add an async logger call), the modified code is now working fine

Is it possible to randomly sample YouTube comments with YouTube API V3?

I have been trying to download all the YouTube comments on popular videos using python requests, but it has been throwing up the following error after about a quarter of the total comments:
{'error': {'code': 400, 'message': "The API server failed to successfully process the request. While this can be a transient error, it usually indicates that the request's input is invalid. Check the structure of the commentThread resource in the request body to ensure that it is valid.", 'errors': [{'message': "The API server failed to successfully process the request. While this can be a transient error, it usually indicates that the request's input is invalid. Check the structure of the commentThread resource in the request body to ensure that it is valid.", 'domain': 'youtube.commentThread', 'reason': 'processingFailure', 'location': 'body', 'locationType': 'other'}]}}
I found this thread detailing the same issue, and it seems that it is not possible to download all the comments on popular videos.
This is my code:
import argparse
import urllib
import requests
import json
import time
start_time = time.time()
class YouTubeApi():
YOUTUBE_COMMENTS_URL = 'https://www.googleapis.com/youtube/v3/commentThreads'
comment_counter = 0
with open("API_keys.txt", "r") as f:
key_list = f.readlines()
key_list = [key.strip('/n') for key in key_list]
def format_comments(self, results, likes_required):
comments_list = []
try:
for item in results["items"]:
comment = item["snippet"]["topLevelComment"]
likes = comment["snippet"]["likeCount"]
if likes < likes_required:
continue
author = comment["snippet"]["authorDisplayName"]
text = comment["snippet"]["textDisplay"]
str = "Comment by {}:\n \"{}\"\n\n".format(author, text)
str = str.encode('ascii', 'replace').decode()
comments_list.append(str)
self.comment_counter += 1
print("Comments downloaded:", self.comment_counter, end="\r")
except(KeyError):
print(results)
return comments_list
def get_video_comments(self, video_id, likes_required):
with open("API_keys.txt", "r") as f:
key_list = f.readlines()
key_list = [key.strip('/n') for key in key_list]
if self.comment_counter <= 900000:
key = self.key_list[0]
elif self.comment_counter <= 1800000:
key = self.key_list[1]
elif self.comment_counter <= 2700000:
key = self.key_list[2]
elif self.comment_counter <= 3600000:
key = self.key_list[3]
elif self.comment_counter <= 4500000:
key = self.key_list[4]
params = {
'part': 'snippet,replies',
'maxResults': 100,
'videoId': video_id,
'textFormat': 'plainText',
'key': key
}
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/74.0.3729.169 Safari/537.36'
}
try:
#data = self.openURL(self.YOUTUBE_COMMENTS_URL, params)
comments_data = requests.get(self.YOUTUBE_COMMENTS_URL, params=params, headers=headers)
except ChunkedEncodingError:
tries = 5
print("Chunked Error. Retrying...")
for n in range(tries):
try:
x = 0
x += 1
print("Trying", x, "times")
response = session.post("https://www.youtube.com/comment_service_ajax", params=params, data=data, headers=headers)
comments_data = json.loads(response.text)
except ChunkedEncodingError as c:
print(c)
results = comments_data.json()
nextPageToken = results.get("nextPageToken")
commments_list = []
commments_list += self.format_comments(results, likes_required)
while nextPageToken:
params.update({'pageToken': nextPageToken})
try:
comments_data = requests.get(self.YOUTUBE_COMMENTS_URL, params=params, headers=headers)
except ChunkedEncodingError as c:
tries = 5
print("Chunked Error. Retrying...")
for n in range(tries):
try:
x = 0
x += 1
print("Trying", x, "times")
response = session.post("https://www.youtube.com/comment_service_ajax", params=params, data=data, headers=headers)
comments_data = json.loads(response.text)
except ChunkedEncodingError as c:
print(c)
results = comments_data.json()
nextPageToken = results.get("nextPageToken")
commments_list += self.format_comments(results, likes_required)
return commments_list
def get_video_id_list(self, filename):
try:
with open(filename, 'r') as file:
URL_list = file.readlines()
except FileNotFoundError:
exit("File \"" + filename + "\" not found")
list = []
for url in URL_list:
if url == "\n": # ignore empty lines
continue
if url[-1] == '\n': # delete '\n' at the end of line
url = url[:-1]
if url.find('='): # get id
id = url[url.find('=') + 1:]
list.append(id)
else:
print("Wrong URL")
return list
def main():
yt = YouTubeApi()
parser = argparse.ArgumentParser(add_help=False, description=("Download youtube comments from many videos into txt file"))
required = parser.add_argument_group("required arguments")
optional = parser.add_argument_group("optional arguments")
here: https://console.developers.google.com/apis/credentials")
optional.add_argument("--likes", '-l', help="The amount of likes a comment needs to be saved", type=int)
optional.add_argument("--input", '-i', help="URL list file name")
optional.add_argument("--output", '-o', help="Output file name")
optional.add_argument("--help", '-h', help="Help", action='help')
args = parser.parse_args()
# --------------------------------------------------------------------- #
likes = 0
if args.likes:
likes = args.likes
input_file = "URL_list.txt"
if args.input:
input_file = args.input
output_file = "Comments.txt"
if args.output:
output_file = args.output
list = yt.get_video_id_list(input_file)
if not list:
exit("No URLs in input file")
try:
vid_counter = 0
with open(output_file, "a") as f:
for video_id in list:
vid_counter += 1
print("Downloading comments for video ", vid_counter, ", id: ", video_id, sep='')
comments = yt.get_video_comments(video_id, likes)
if comments:
for comment in comments:
f.write(comment)
print('\nDone!')
except KeyboardInterrupt:
exit("User Aborted the Operation")
# --------------------------------------------------------------------- #
if __name__ == '__main__':
main()
The next best method would be to randomly sample them. Does anyone know if this is possible with the API V3?
Even if the API returns a processingFailure error, you could still catch that (or any other API error for that matter) for to terminate gracefully your pagination loop. This way your script will provide the top-level comments that it fetched from of the API prior to the occurrence of the first API error.
The error response provided by the YouTube Data API is (usually) of the following form:
{
"error": {
"errors": [
{
"domain": <string>,
"reason": <string>,
"message": <string>,
"locationType": <string>,
"location": <string>
}
],
"code": <integer>,
"message": <string>
}
}
Hence, you could have defined the following function:
def is_error_response(response):
error = response.get('error')
if error is None:
return False
print("API Error: "
f"code={error['code']} "
f"domain={error['errors'][0]['domain']} "
f"reason={error['errors'][0]['reason']} "
f"message={error['errors'][0]['message']!r}")
return True
that you'll invoke after each statement of form results = comments_data.json(). In case of the first occurrence of that statement, you'll have:
results = comments_data.json()
if is_error_response(results):
return []
nextPageToken = results.get("nextPageToken")
For the second instance of that statement:
results = comments_data.json()
if is_error_response(results):
return comments_list
nextPageToken = results.get("nextPageToken")
Notice that the function is_error_response above prints out an error message on stdout in case its argument in an API error response; this is for the purpose of having the user of your script informed about the API call failure.

Flutter .where and .orderBy queries against Firebase problem

I'm writing a Flutter application which uses the user's location to identify items around them. As it's only possible to do comparison queries against one field in Firebase, I'm using geohashes as per Frank van Puffelen's lecture on the subject.
I've got the initial query working fine:
qs = await Firestore.instance
.collection('users')
.where('geohash', isGreaterThanOrEqualTo: boundaryGeoHash1)
.where('geohash', isLessThanOrEqualTo: boundaryGeoHash2)
.getDocuments();
this returns all the people in the required geographic area, as expected. The problem is, I expect there to be a high volume of users, so I want to limit the amount of downloads per query to a certain number - say 10.
When I write this query (which I believed should do the trick), what actually seems to happen is that the table is ordered by 'geohash', it then queries the first 10 items in the table, and then sends me the records in the first 10 items that fulfil the requirements of the .where part of the query:
qs = await Firestore.instance
.collection('users')
.orderBy('geohash')
.where('geohash', isGreaterThanOrEqualTo: boundaryGeoHash1)
.where('geohash', isLessThanOrEqualTo: boundaryGeoHash2)
.startAfterDocument(dS)
.limit(10)
.getDocuments();
//dS = last document from previous query
whereas what I want the query to do is to always send me 10 results, all of which fulfil the .where part of the query.
Using the test database I've generated, calling the initial query returns 114 results. The second one finds 2 which brings me to my conclusion as to what is happening.
Can what I'm trying to do be achieved? And if so, any clues as to what I'm doing wrong?
EDIT
As per Frank's request, dS is populated as follows:
if (qs != null) {
dS = qs.documents[qs.documents.length - 1];
}
//dS is a global variable
EDIT 2
Here is the function which does the query on the database.
Future<List<CutDownUserProfile>> getReducedUserDataBetweenGeohashes(
String boundaryGeoHash1, String boundaryGeoHash2, DocumentSnapshot dS, int downloadLimit) async {
List<CutDownUserProfile> retn = List<CutDownUserProfile>();
QuerySnapshot qs;
if (dS != null) {
print('ds != null');
print('dS.userNumber = ' + dS.data['userNumber'].toString());
print('dS.userID = ' + dS.data['userID']);
print('dS.geohash = ' + dS.data['geohash']);
print('dS.name = ' + dS.data['name']);
qs = await Firestore.instance
.collection('userHeaders')
.orderBy('geohash')
.where('geohash', isGreaterThanOrEqualTo: boundaryGeoHash1)
.where('geohash', isLessThanOrEqualTo: boundaryGeoHash2)
.startAfterDocument(dS)
.limit(downloadLimit)
.getDocuments();
} else {
print('ds == null');
qs = await Firestore.instance
.collection('userHeaders')
.orderBy('geohash')
.where('geohash', isGreaterThanOrEqualTo: boundaryGeoHash1)
.where('geohash', isLessThanOrEqualTo: boundaryGeoHash2)
.limit(downloadLimit)
.getDocuments();
}
if (qs.documents.isNotEmpty) {
print('through this! qs len = ' + qs.documents.length.toString());
List<DocumentSnapshot> ds = qs.documents.toList();
print('/////DS Len = ' + ds.length.toString());
for (int i = 0; i < ds.length; i++) {
CutDownUserProfile cDUP = CutDownUserProfile();
cDUP.geoHash = ds[i].data['geohash'];
Vector2 latLon = globals.decodeGeohash(cDUP.geoHash);
double dist = getDistanceFromLatLonInKm(globals.localUser.homeLat, globals.localUser.homeLon, latLon.x, latLon.y);
if (dist <= globals.localUser.maxDistance && dist <= ds[i].data['maxDist']) {
CutDownUserProfile cDUP2 = CutDownUserProfile();
cDUP2.userNumber = ds[i].data['userNumber'];
cDUP2.userID = ds[i].data['userID'];
cDUP2.geoHash = ds[i].data['geohash'];
retn.add(cDUP2);
}
}
if (qs != null) {
globals.lastProfileSeen = qs.documents[qs.documents.length - 1];
}
} else {
print('no results');
}
}
The line print('/////DS Len = ' + ds.length.toString()); prints out the length of the queries returned from the search, and, as mentioned earlier returns 2. Without the .orderBy the .startAfterDocument(dS) and the .limit(downloadLimit) the code returns 114 results, which is what I expected. For completeness, here is the original code:
Future<List<CutDownUserProfile>> getReducedUserDataBetweenGeohashesALL(String boundaryGeoHash1, String boundaryGeoHash2) async {
List<CutDownUserProfile> retn = List<CutDownUserProfile>();
await Firestore.instance
.collection('userHeaders')
.where('geohash', isGreaterThanOrEqualTo: boundaryGeoHash1)
.where('geohash', isLessThanOrEqualTo: boundaryGeoHash2)
.getDocuments()
.then((event) {
if (event.documents.isNotEmpty) {
List<DocumentSnapshot> ds = event.documents.toList(); //if it is a single document
print('/////DS Len = ' + ds.length.toString());
for (int i = 0; i < ds.length; i++) {
CutDownUserProfile cDUP = CutDownUserProfile();
cDUP.geoHash = ds[i].data['geohash'];
Vector2 latLon = globals.decodeGeohash(cDUP.geoHash);
double dist = getDistanceFromLatLonInKm(globals.localUser.homeLat, globals.localUser.homeLon, latLon.x, latLon.y);
CutDownUserProfile cDUP2 = CutDownUserProfile();
cDUP2.userNumber = ds[i].data['userNumber'];
cDUP2.userID = ds[i].data['userID'];
cDUP2.geoHash = ds[i].data['geohash'];
retn.add(cDUP2);
}
} else {
print('no results');
}
}).catchError((e) => print("error fetching data: $e"));

Firebase snapshot prints optional

As i run a snapshot from a firebase database below it returns
Optional(498895446)
when i only want it to return
498895446
as an int. I have tried toint() but it is not working as i get an error. How can i get rid of this optional.
let ref = FIRDatabase.database().reference().child("Users + infomation").child(currentuser).child("timeStamp ")
ref.observeSingleEventOfType(.Value, withBlock : {(snapShot) in
let val = snapShot.value
if snapShot.exists(){
print("\(val)")
}
else if snapShot.exists() == false {
print("snappyaintexist")
}
})
Try:-
let ref = FIRDatabase.database().reference().child("Users + infomation").child(currentuser).child("timeStamp ")
ref.observeSingleEventOfType(.Value, withBlock : {(snapShot) in
if let val = snapShot.value as? Int{
print("\(val!)")
}else{
print("snappyaintexist")
}
})

Resources