Get HTTP response body as string (BubbleWrap for RubyMotion) - http

Using RubyMotion (for the first time!), I want to use Twitter's search API to retrieve some recent tweets for some users so have put together the class below.
The value of tweets is always an empty array. I suspect that BW::HTTP.get(url) spawns its own thread which is causing the issue.
Really, I just want twitter_search_results to return response.body.to_str but I am not sure how to do this.
How do I use RubyMotion (or BubbleWrap) to put an array of Tweet objects into my UIViewController?
class TweetsController
def initialize
#twitter_accounts = %w(dhh google)
#tweets = []
end
def tweets
twitter_search_results
puts #tweets.count
#tweets
end
def create_tweets(response)
BW::JSON.parse(response)["results"].each do |result|
#tweets << Tweet.new(result)
end
end
def twitter_search_results
query = #twitter_accounts.map{ |account| "from:#{account}" }.join(" OR ")
url = "http://search.twitter.com/search.json?q=#{query}"
BW::HTTP.get(url) do |response|
create_tweets(response.body.to_str)
end
end
end
class TwitterViewController < UIViewController
def viewDidLoad
super
self.view.backgroundColor = UIColor.blueColor
#table = UITableView.alloc.initWithFrame(self.view.bounds)
self.view.addSubview #table
#table.dataSource = self
#tweets_controller = TweetsController.new
end
def initWithNibName(name, bundle: bundle)
super
self.tabBarItem = UITabBarItem.alloc.initWithTitle(
"Twitter",
image: UIImage.imageNamed('twitter.png'),
tag: 1)
self
end
def tableView(tableView, numberOfRowsInSection: section)
#tweets_controller.tweets.length
end
def tableView(tableView, cellForRowAtIndexPath: indexPath)
#reuse_id = "Tweet"
cell = UITableViewCell.alloc.initWithStyle(UITableViewCellStyleDefault, reuseIdentifier:#reuse_id)
cell.textLabel.text = #tweets_controller.tweets[indexPath.row].text
return cell
end
end
class Tweet
attr_reader :created_at, :from_user, :text
def initialize(tweet_result)
#created_at = tweet_result["created_at"]
#from_user = tweet_result["from_user"]
#text = tweet_result["text"]
end
end

Full controller code below. I've also put the project on GitHub
class TweetsController
def initialize
#twitter_accounts = %w(dhh google)
#tweets = []
create_tweets
end
def tweets
#tweets
end
def create_tweets
json_data = twitter_search_results.dataUsingEncoding(NSUTF8StringEncoding)
e = Pointer.new(:object)
dict = NSJSONSerialization.JSONObjectWithData(json_data, options:0, error: e)
dict["results"].each do |result|
p result.class
p result
#tweets << Tweet.new(result)
end
end
def twitter_search_results
query = #twitter_accounts.map{ |account| "from:#{account}" }.join(" OR ")
url_string = "http://search.twitter.com/search.json?q=#{query}"
url_string_escaped = url_string.stringByAddingPercentEscapesUsingEncoding(NSUTF8StringEncoding)
url = NSURL.URLWithString(url_string_escaped)
request = NSURLRequest.requestWithURL(url)
response = nil
error = nil
data = NSURLConnection.sendSynchronousRequest(request, returningResponse: response, error: error)
raise "BOOM!" unless (data.length > 0 && error.nil?)
json = NSString.alloc.initWithData(data, encoding: NSUTF8StringEncoding)
end
end

the issue here is asynchronicity. you're almost there, I think, but the create_tweets method is not called before puts #tweets. In this case, I would recommend using a notification, because I think they are good ;-)
TweetsReady = 'TweetsReady' # constants are nice
NSNotificationCenter.defaultCenter.postNotificationName(TweetsReady, object:#tweets)
In your controller, register for this notification in `viewWillAppear` and unregister in `viewWillDisappear`
NSNotificationCenter.defaultCenter.addObserver(self, selector: 'tweets_ready:', name: TweetsReady, object:nil) # object:nil means 'register for all events, not just ones associated with 'object'
# ...
NSNotificationCenter.defaultCenter.removeObserver(self, name:TweetsReady, object:nil)
and you tweets_ready method should implement your UI changes.
def tweets_ready(notification)
#table.reloadData
end

Related

Adding context from django_tables2 library to admin panel

I'm trying to use django_tables2 in Django admin panel.
Part of the library that I'm interested in is:
'''
class SingleTableMixin(TableMixinBase):
table_class = None
table_data = None
def get_table_class(self):
if self.table_class:
return self.table_class
if self.model:
return tables.table_factory(self.model)
raise ImproperlyConfigured(
"You must either specify {0}.table_class or
{0}.model".format(type(self).__name__)
)
def get_table(self, **kwargs):
table_class = self.get_table_class()
table = table_class(data=self.get_table_data(), **kwargs)
return RequestConfig(self.request,
paginate=self.get_table_pagination(table)).configure(
table
)
def get_table_data(self):
if self.table_data is not None:
return self.table_data
elif hasattr(self, "object_list"):
return self.object_list
elif hasattr(self, "get_queryset"):
return self.get_queryset()
klass = type(self).__name__
raise ImproperlyConfigured(
"Table data was not specified. Define {}.table_data".format(klass)
)
def get_table_kwargs(self):
return {}
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
table = self.get_table(**self.get_table_kwargs())
context[self.get_context_table_name(table)] = table
return context
'''
The problem is that context is not sent to admin panel. Can I import somehow this context from the last method?

How to stop the Crawler

I am trying to write a crawler that goes to a website and searches for a list of keywords, with max_Depth of 2. But the scraper is supposed to stop once any of the keyword's appears on any page, the problem i am facing right now is that the crawler does-not stop when it first see's any of the keywords.
Even after trying, early return command, break command and CloseSpider Commands and even python exit commands.
My class of the Crawler:
class WebsiteSpider(CrawlSpider):
name = "webcrawler"
allowed_domains = ["www.roomtoread.org"]
start_urls = ["https://"+"www.roomtoread.org"]
rules = [Rule(LinkExtractor(), follow=True, callback="check_buzzwords")]
crawl_count = 0
words_found = 0
def check_buzzwords(self, response):
self.__class__.crawl_count += 1
crawl_count = self.__class__.crawl_count
wordlist = [
"sfdc",
"pardot",
"Web-to-Lead",
"salesforce"
]
url = response.url
contenttype = response.headers.get("content-type", "").decode('utf-8').lower()
data = response.body.decode('utf-8')
for word in wordlist:
substrings = find_all_substrings(data, word)
for pos in substrings:
ok = False
if not ok:
if self.__class__.words_found==0:
self.__class__.words_found += 1
print(word + "," + url + ";")
STOP!
return Item()
def _requests_to_follow(self, response):
if getattr(response, "encoding", None) != None:
return CrawlSpider._requests_to_follow(self, response)
else:
return []
I want it to stop execution when if not ok: is True.
When I want to stop a spider, I usually use the exception exception scrapy.exceptions.CloseSpider(reason='cancelled') from Scrapy-Docs.
The example there shows how you can use it:
if 'Bandwidth exceeded' in response.body:
raise CloseSpider('bandwidth_exceeded')
In your case something like
if not ok:
raise CloseSpider('keyword_found')
Or is that what you meant with
CloseSpider Commands
and already tried it?

When to return an Item if I don't know when the spider will finish?

So my spider takes in a list of websites, and it crawls through each one via start_requests which yield request passing in item as meta.
Then, the spider explores all the internal links of a single website and collects all the external links into the item. The problem is that I don't know when the spider finishes crawling all the internal links, so I can't yield an item.
class WebsiteSpider(scrapy.Spider):
name = "web"
def start_requests(self):
filename = "websites.csv"
requests = []
try:
with open(filename, 'r') as csv_file:
reader = csv.reader(csv_file)
header = next(reader)
for row in reader:
seed_url = row[1].strip()
item = Links(base_url=seed_url, on_list=[])
request = Request(seed_url, callback=self.parse_seed)
request.meta['item'] = item
requests.append(request)
return requests
except IOError:
raise scrapy.exceptions.CloseSpider("A list of websites are needed")
def parse_seed(self, response):
item = response.meta['item']
netloc = urlparse(item['base_url']).netloc
external_le = LinkExtractor(deny_domains=netloc)
external_links = external_le.extract_links(response)
for external_link in external_links:
item['on_list'].append(external_link)
internal_le = LinkExtractor(allow_domains=netloc)
internal_links = internal_le.extract_links(response)
for internal_link in internal_links:
request = Request(internal_link, callback=self.parse_seed)
request.meta['item'] = item
yield request
the start_requests method needs to yield Request objects. You don't need to return a list of requests, but only yield a Request when it is ready, this works because scrapy requests are asynchronous.
The same happens with items, you just need to yield your items whenever you think the item is ready, I would recommend for your case to just check if there are no more internal_links to yield the item, or also you can as many items as you want, and then check which one was the last (or the one with more data):
class WebsiteSpider(scrapy.Spider):
name = "web"
def start_requests(self):
filename = "websites.csv"
requests = []
try:
with open(filename, 'r') as csv_file:
reader = csv.reader(csv_file)
header = next(reader)
for row in reader:
seed_url = row[1].strip()
item = Links(base_url=seed_url, on_list=[])
yield Request(seed_url, callback=self.parse_seed, meta = {'item'=item})
except IOError:
raise scrapy.exceptions.CloseSpider("A list of websites are needed")
def parse_seed(self, response):
item = response.meta['item']
netloc = urlparse(item['base_url']).netloc
external_le = LinkExtractor(deny_domains=netloc)
external_links = external_le.extract_links(response)
for external_link in external_links:
item['on_list'].append(external_link)
internal_le = LinkExtractor(allow_domains=netloc)
internal_links = internal_le.extract_links(response)
if internal_links:
for internal_link in internal_links:
request = Request(internal_link, callback=self.parse_seed)
request.meta['item'] = item
yield request
else:
yield item
another thing you could do is create an extension to do what you need on the spider_closed method and do whatever you want knowing when the spider ended.

Flask-WTF: Queries of FormFields in FieldList are none after validate_on_submit

I'm trying to generate dynamic forms using Flask-WTF to create a new product based on some templates. A product will have a list of required key-value pairs based on its type, as well as a list of parts required to build it. The current relevant code looks as follows:
forms.py:
class PartSelectionForm(Form):
selected_part = QuerySelectField('Part', get_label='serial', allow_blank=True)
part_type = StringField('Type')
slot = IntegerField('Slot')
required = BooleanField('Required')
def __init__(self, csrf_enabled=False, *args, **kwargs):
super(PartSelectionForm, self).__init__(csrf_enabled=False, *args, **kwargs)
class NewProductForm(Form):
serial = StringField('Serial', default='', validators=[DataRequired()])
notes = TextAreaField('Notes', default='')
parts = FieldList(FormField(PartSelectionForm))
views.py:
#app.route('/products/new/<prodmodel>', methods=['GET', 'POST'])
#login_required
def new_product(prodmodel):
try:
model = db.session.query(ProdModel).filter(ProdModel.id==prodmodel).one()
except NoResultFound, e:
flash('No products of model type -' + prodmodel + '- found.', 'error')
return redirect(url_for('index'))
keys = db.session.query(ProdTypeTemplate.prod_info_key).filter(ProdTypeTemplate.prod_type_id==model.prod_type_id)\
.order_by(ProdTypeTemplate.prod_info_key).all()
parts_needed = db.session.query(ProdModelTemplate).filter(ProdModelTemplate.prod_model_id==prodmodel)\
.order_by(ProdModelTemplate.part_type_id, ProdModelTemplate.slot).all()
class F(forms.NewProductForm):
pass
for key in keys:
if key.prod_info_key in ['shipped_os','factory_os']:
setattr(F, key.prod_info_key, forms.QuerySelectField(key.prod_info_key, get_label='version'))
else:
setattr(F, key.prod_info_key, forms.StringField(key.prod_info_key, validators=[forms.DataRequired()]))
form = F(request.form)
if request.method == 'GET':
for part in parts_needed:
entry = form.parts.append_entry(forms.PartSelectionForm())
entry.part_type.data=part.part_type_id
entry.slot.data=slot=part.slot
entry.required.data=part.required
entry.selected_part.query = db.session.query(Part).join(PartModel).filter(PartModel.part_type_id==part.part_type_id, Part.status=='inventory')
if form.__contains__('shipped_os'):
form.shipped_os.query = db.session.query(OSVersion).order_by(OSVersion.version)
if form.__contains__('factory_os'):
form.factory_os.query = db.session.query(OSVersion).order_by(OSVersion.version)
if form.validate_on_submit():
...
Everything works as expected on a GET request, but on the validate_on_submit I get errors. The error is that all of the queries and query_factories for the selected_part QuerySelectFields in the list of PartSelectionForms is none, causing either direct errors in WTForms validation code or when Jinja2 attempts to re-render the QuerySelectFields. I'm not sure why this happens on the POST when everything appears to be correct for the GET.
I realized that although I set the required queries on a GET I'm not doing it for any PartSelectionForm selected_part entries on the POST. Since I already intended part_type, slot, and required to be hidden form fields, I added the following immediately before the validate_on_submit and everything works correctly:
for entry in form.parts:
entry.selected_part.query = db.session.query(Part).join(PartModel).\
filter(PartModel.part_type_id==entry.part_type.data, Part.status=='inventory')

GTK TreeView with ListStore don't display anything. Python and SQLite3 used

I use GTK and Python for developing an application.
I want to load TreeView elements (1 column) from SQLite3 database.
But something go wrong (without any error)!
Here is a whole code:
#!/usr/bin/python
import sys
import sqlite3 as sqlite
from gi.repository import Gtk
from gi.repository import Notify
def notify(notifer, text, notificationtype=""):
Notify.init("Application")
notification = Notify.Notification.new (notifer, text, notificationtype)
notification.show ()
def get_object(gtkname):
builder = Gtk.Builder()
builder.add_from_file("main.ui")
return builder.get_object(gtkname)
def base_connect(basefile):
return sqlite.connect(basefile)
class Handler:
def main_destroy(self, *args):
Gtk.main_quit(*args)
def hardwaretool_clicked(self, widget):
baselist = get_object("subjectlist")
baselist.clear()
base = base_connect("subjectbase")
with base:
cur = base.cursor()
cur.execute("SELECT * FROM sub")
while True:
row = cur.fetchone()
if row == None:
break
iter = baselist.append()
print "row ", row[0]
baselist.set(iter, 0, row[0])
cur.close()
def gamestool_clicked(self, widget):
print("gamestool clicked!!!!! =)")
def appstool_clicked(self, widget):
print("appstool clicked!!!!! =)")
def fixtool_clicked(self, widget):
notify("Charmix","Fix Applied", "dialog-ok")
def brokenfixtool_clicked(self, widget):
notify("Charmix","Broken Fix Report Sended", "dialog-error")
def sendfixtool_clicked(self, widget):
notify("Charmix","Fix Sended", "dialog-information")
class CharmixMain:
def __init__(self):
builder = Gtk.Builder()
builder.add_from_file("main.ui")
self.window = builder.get_object("main")
self.subject = builder.get_object("subjectlist")
self.problem = builder.get_object("problemlist")
self.toolbar = builder.get_object("toolbar")
self.hardwaretool = builder.get_object("hardwaretool")
self.gamestool = builder.get_object("gamestool")
self.appstool = builder.get_object("appstool")
self.fixtool = builder.get_object("fixtool")
self.brokenfixtool = builder.get_object("brokenfixtool")
self.sendfixtool = builder.get_object("sendfixtool")
builder.connect_signals(Handler())
context = self.toolbar.get_style_context()
context.add_class(Gtk.STYLE_CLASS_PRIMARY_TOOLBAR)
if __name__ == "__main__":
Charmix = CharmixMain()
Charmix.window.show()
Gtk.main()
I'm interested in this part (not working normally):
def hardwaretool_clicked(self, widget):
baselist = get_object("subjectlist")
baselist.clear()
base = base_connect("subjectbase")
with base:
cur = base.cursor()
cur.execute("SELECT * FROM sub")
while True:
row = cur.fetchone()
if row == None:
break
iter = baselist.append()
print "row ", row[0]
baselist.set(iter, 0, row[0])
cur.close()
TreeView(subjecttree) don't display anything, but print "row ", row[0] works fine and display all the strings.
Please, help me.
Maybe i need to repaint TreeView or thomething like that?
Do you know, how can I get it?
The problem is in your get_object method.
When you do:
builder = Gtk.Builder()
builder.add_from_file("main.ui")
you're actually creating a new window; even if you are using the same ui file, you are creating a completely different widget.
One way to get around the problem of accesing the widgets you need to process with your handler is to pass them as parameter of the constructor:
class Handler(object):
def __init__(self, widget1, widget2):
self.widget1 = widget1
self.widget2 = widget2
...
You can use those widgets on the handler's method afterwards.
Another way of accesing the widgets in a more 'decoupled' way is to add the object you want to use as the last parameter of the connect method when you're connecting signals; the drawback is that you would have to do this manually (since Glade doesn't provide this posibility)
self.widget.connect('some-signal', handler.handler_method, object_to_use)

Resources