Why am I getting NameError: Not Defined after adding to urls.py? - django-urls

I am getting this error:
NameError at /storefront/
name 'storefront' is not defined
My Urls.Py looks like this:
from django.conf import settings
from django.conf.urls import patterns, include, url
from django.conf.urls.static import static
from django.views.generic import TemplateView
from accounts.views import home
from django.contrib import admin
urlpatterns = patterns('',
url(r"^$", home),
url(r"^storefront/", storefront, name="storefront")
My Views.Py looks like this:
def storefront(request):
latest_entries = Entry.objects.filter()
context = {'latest_entries': latest_entries}
return render(request, 'storefront.html', context)
Is the error in the urls or the views?

Related

Amazon AirFlow 1.10.12: No module named 'operators'

I am creating a plugin and dag structure for Amazon AirFlow 1.10.12. I do according to the documentation:
dags:
- aws_from_redshift_to_s3.py
plugins:
- __init__.py
- from_redshift_to_s3_plugin.py
- operators:
-- __init__.py
-- aws_from_redshift_to_s3_operator.py
aws_from_redshift_to_s3_operator.py:
from airflow.hooks.postgres_hook import PostgresHook
from airflow.models import BaseOperator
from airflow.utils.decorators import apply_defaults
from airflow.contrib.hooks.aws_hook import AwsHook
class FromRedshiftToS3TransferOperator(BaseOperator):
pass
from_redshift_to_s3_plugin.py:
from airflow.plugins_manager import AirflowPlugin
from operators.aws_from_redshift_to_s3_operator import FromRedshiftToS3TransferOperator
class FromRedShiftToS3Plugin(AirflowPlugin):
name = 'from_redshift_to_s3_plugin'
operators = [FromRedshiftToS3TransferOperator]
В самом ДАГе подключаю так:
from operators.from_redshift_to_s3_plugin import FromRedshiftToS3TransferOperator
При попытке активировать ДАГ в Amazon AirFlow 1.10.12 получаю ошибку: No module named 'operators'
https://airflow.apache.org/docs/apache-airflow/1.10.12/howto/custom-operator.html
As mentioned in this documentation, you no longer need to import from operators. Instead try importing like this,
from aws_from_redshift_to_s3_operator import FromRedshiftToS3TransferOperator

Not compile project after migrate from Vaadin 7 to Vaadin 8

Current version of Vaadin is 7.3.6
Here some my code:
import com.vaadin.data.Property;
import com.vaadin.data.Property.ValueChangeEvent;
import com.vaadin.ui.NativeSelect;
import com.vaadin.ui.TextField;
import com.vaadin.ui.UI;
import com.vaadin.ui.VerticalLayout;
private NativeSelect currencySelector;
private void initCurrencySelector(String providerId) {
currencySelector = new NativeSelect();
List<String> selectCurrencyList;
currencySelector.removeAllItems();
}
And this code success compile.
But after I try to upgrade to Vaadin 8.12.0 then this code not compile.
error in this lines:
import com.vaadin.data.Property;
import com.vaadin.data.Property.ValueChangeEvent;
import com.vaadin.event.FieldEvents.TextChangeEvent;
import com.vaadin.event.FieldEvents.TextChangeListener;
and in this line:
currencySelector.removeAllItems();
the new imports should be
import com.vaadin.data.HasValue.ValueChangeEvent;
import com.vaadin.event.FieldsEvent
TextChangeEvent and TextChangeListener probably were replaced by HasValue.ValueChangeEvent and HasValue.ValueChangeListener
currencySelector.removeAllItems(); should be
currencySelector.setDataProvider(new ListDataProvider(new ArrayList()));
a list of incompatible changes can be found here https://vaadin.com/download/prerelease/8.0/8.0.0/8.0.0.beta1/release-notes.html#incompatible

Pact: Error when trying to setup mock provider

I'm trying to write my first Pact-python test using pytest, Could someone please tell me what's wrong with my code?
import unittest
import requests
import json
import pytest
import atexit
from pact import Consumer, Provider
pact = Consumer('Consumer').has_pact_with(Provider('Provider'), host_name='mockservice', port=8080)
pact.start_service()
atexit.register(pact.stop_service)
class InterviewDetails(unittest.TestCase):
def test_candidate_report_api(self):
candidate_report_payload = {}
resp = requests.post("http://localhost:1234/users/",data=json.dumps(candidate_report_payload))
response = json.loads(resp.text)
return response
#pytest.mark.health1
def test_candidate_report(self):
expected = {}
(pact.given('Comment')
.upon_receiving('comment')
.with_request(method='POST', path="http://localhost:1234/users/", headers={})
.will_respond_with(200, body=expected))
with pact:
pact.setup()
result = self.test_candidate_report_api()
self.assertEqual(result, expected)
pact.verify()
The error from stacktrace:
AttributeError: module 'pact' has no attribute 'Like'
Can you please confirm you're using pact-python from https://github.com/pact-foundation/pact-python/ (and not pactman, a project that is not maintained by the Pact Foundation)?
It might be related to the way you have setup your test?
Here is an example project you can use for reference: https://github.com/pactflow/example-consumer-python/
Relevant test code:
"""pact test for product service client"""
import json
import logging
import os
import requests
from requests.auth import HTTPBasicAuth
import pytest
from pact import Consumer, Like, Provider, Term, Format
from src.consumer import ProductConsumer
log = logging.getLogger(__name__)
logging.basicConfig(level=logging.INFO)
print(Format().__dict__)
PACT_MOCK_HOST = 'localhost'
PACT_MOCK_PORT = 1234
PACT_DIR = os.path.dirname(os.path.realpath(__file__))
#pytest.fixture
def consumer():
return ProductConsumer(
'http://{host}:{port}'
.format(host=PACT_MOCK_HOST, port=PACT_MOCK_PORT)
)
#pytest.fixture(scope='session')
def pact(request):
pact = Consumer('pactflow-example-consumer-python').has_pact_with(
Provider('pactflow-example-provider-python'), host_name=PACT_MOCK_HOST, port=PACT_MOCK_PORT,
pact_dir="./pacts", log_dir="./logs")
try:
print('start service')
pact.start_service()
yield pact
finally:
print('stop service')
pact.stop_service()
def test_get_product(pact, consumer):
expected = {
'id': "27",
'name': 'Margharita',
'type': 'Pizza'
}
(pact
.given('a product with ID 10 exists')
.upon_receiving('a request to get a product')
.with_request('GET', '/product/10')
.will_respond_with(200, body=Like(expected)))
with pact:
user = consumer.get_product('10')
assert user.name == 'Margharita'

Python import error and can't find some files

I have been getting errors and I do not know how to go about it. Its telling me i have an import error.
from ..items import QuotetutorialItem
ImportError: attempted relative import with no known parent package.
import scrapy
from scrapy.http import FormRequest
from scrapy.utils.response import open_in_browser
from..items import QuotetutorialItem
class Quotespider(scrapy.Spider):
name = 'quotes'
start_urls =[
'http://quotes.toscrape.com/login'
]
def parse(self, response):
token = response.css('form input::attr(value)').extract_first()
return FormRequest.from_response(response, formdata={
'csrf_token' : token,
'username' : 'abc',
'password' : '123',
}, callback=self.start_scraping)
def start_scraping(self, response):
open_in_browser(response)
items = QuotetutorialItem()
all_div_quotes = response.css('div.quote')
for quotes in all_div_quotes:
title = quotes.css('span.text::text').extract()
author = quotes.css('.author::text').extract()
tag = quotes.css('.tag::text').extract()
items['title'] = title
items['author'] = author
items['tag'] = tag
yield items

530 error when trying to open FTP directory

I want to use Scrapy to download files and navigate folders at ftp://ftp.co.palm-beach.fl.us/Building%20Permits/.
Here's my spider:
# -*- coding: utf-8 -*-
import scrapy
from scrapy.http import Request
class LatestPermitsSpider(scrapy.Spider):
name= "latest_permits"
allowed_domains=["ftp.co.palm-beach.fl.us"]
handle_httpstatus_list = [404]
ftpUser= "the_username"
ftpPW= "the_password"
permitFilesDir= "ftp://ftp.co.palm-beach.fl.us/Building%20Permits/"
def start_requests(self):
yield Request(
url=self.permitFilesDir,
meta={
"ftp_user": self.ftpUser,
"ftp_password": self.ftpPW
}
)
def parse(self,response):
print response.body
When I run scrapy crawl latest_permits, I get this error:
ConnectionLost: ('FTP connection lost', <twisted.python.failure.Failure twisted.protocols.ftp.CommandFailed: ['530 Sorry, no ANONYMOUS access allowed.']>)
Why does this error come up even when I supply the correct username and password?
Look at the below source code of scrapy
https://github.com/scrapy/scrapy/blob/master/scrapy/core/downloader/handlers/ftp.py
The issue is not with your username or password. The issue is the scrapy supports only files to be downloaded using ftp it doesn't add support for listing directories. The url you are using is a directory url
There is a possible workaround to actually use a package name ftptree
Add handlers.py with below code
import json
from twisted.protocols.ftp import FTPFileListProtocol
from scrapy.http import Response
from scrapy.core.downloader.handlers.ftp import FTPDownloadHandler
class FtpListingHandler(FTPDownloadHandler):
def gotClient(self, client, request, filepath):
self.client = client
protocol = FTPFileListProtocol()
return client.list(filepath, protocol).addCallbacks(
callback=self._build_response, callbackArgs=(request, protocol),
errback=self._failed, errbackArgs=(request,))
def _build_response(self, result, request, protocol):
self.result = result
body = json.dumps(protocol.files)
return Response(url=request.url, status=200, body=body)
And then in your settings.py use
DOWNLOAD_HANDLERS = {'ftp': 'cralwername.handlers.FtpListingHandler'}
A sample spider
import os
import json
from urlparse import urlparse
from scrapy import Spider
from scrapy.http.request import Request
from ftptree_crawler.items import FtpTreeLeaf
class AnonFtpRequest(Request):
anon_meta = {'ftp_user': 'anonymous',
'ftp_password': 'laserson#cloudera.com'}
def __init__(self, *args, **kwargs):
super(AnonFtpRequest, self).__init__(*args, **kwargs)
self.meta.update(self.anon_meta)
class FtpTreeSpider(Spider):
name = 'ftptree'
def __init__(self, config_file, *args, **kwargs):
super(FtpTreeSpider, self).__init__(*args, **kwargs)
with open(config_file, 'r') as ip:
config = json.loads(ip.read())
url = 'ftp://%s/%s' % (config['host'], config['root_path'])
self.start_url = url
self.site_id = config['id']
def start_requests(self):
yield AnonFtpRequest(self.start_url)
def parse(self, response):
url = urlparse(response.url)
basepath = url.path
files = json.loads(response.body)
for f in files:
if f['filetype'] == 'd':
path = os.path.join(response.url, f['filename'])
request = AnonFtpRequest(path)
yield request
if f['filetype'] == '-':
path = os.path.join(basepath, f['filename'])
result = FtpTreeLeaf(
filename=f['filename'], path=path, size=f['size'])
yield result
Links to look at if you need further information
https://github.com/laserson/ftptree/blob/master/ftptree_crawler/
https://gearheart.io/blog/crawling-ftp-server-with-scrapy/

Resources