I searched for hours what the heck is the reason for this error message:
I have a search entry, which update a listbox depending on my search with a callback function:
Listbox:
self.name_search=tk.StringVar()
self.name_search.trace_add('write', self.my_callback)
self.e_name_search_text = tk.Label(search_f, text="Name: ").grid(row=0, column=0, padx=10, pady=5, sticky='E')
self.e_name_search = ttk.Entry(search_f, width = 35, textvariable=self.name_search)
self.e_name_search.grid(row=0, column=1, padx=5, pady=5, sticky='W')
self.lbox = tk.Listbox(search_f, width=35, height=8)
self.lbox.bind("<Double-Button-1>", self.show_name_search)
self.lbox.bind('<Return>', self.show_name_search)
self.scrollbar = tk.Scrollbar(search_f)
self.lbox.grid(row=1, column=1, rowspan=3, padx=10, pady=1)
self.lbox.config(yscrollcommand = self.scrollbar.set)
self.scrollbar.grid(row=1, column=2, rowspan=3, padx=1, pady=1, sticky='ns')
self.scrollbar.config(command=self.lbox.yview)
So If I type my search, the listbox show me a reduced list of values out of my sqlite database, I am interessed in. If I select one with dobble click. Another sqlite query update my comboboxes.
If I select one I get this error:
Exception in Tkinter callback
Traceback (most recent call last):
File "C:\Python38-32\lib\tkinter\__init__.py", line 1883, in __call__
return self.func(*args)
File "D:\... name.py", line 337, in show_name_search
self.e_fax.current(0)
File "C:\Python38-32\lib\tkinter\ttk.py", line 717, in current
return self.tk.call(self._w, "current", newindex)
_tkinter.TclError: Index 0 out of range
Line 337 comes from another function:
def show_name_search(self, event):
self.clear_field()
widget = event.widget
selection = widget.curselection()
indName = widget.get(selection[0])
print(indName)
print("selktierter Wert: {}".format(indName))
self.realName.set(indName)
connection = sqlite3.connect(select_connect_db)
print('Database connected.')
with connection:
cursor = connection.cursor()
cursor.execute("SELECT number, type, prio, id, uniqueid FROM numbers WHERE realName=?;",(indName,))
data = cursor.fetchall()
print(data)
for row in data:
if row[1] == 'home':
self.phone_home.append(row[0])
print('HOME:',self.phone_home)
if row[1] == 'mobile':
self.mobile.append(row[0])
print('Mobile:',self.mobile)
if row[1] == 'work':
self.business.append(row[0])
print(row[0])
print('WORK:',self.business)
if row[1] == 'fax_work':
self.fax.append(row[0])
print(row[0])
print('FAX_WORK:',self.fax)
self.uid_name.set(row[4])
if len(self.phone_home) != 0:
self.e_phone['values'] = self.phone_home
self.e_phone.current(0)
if len(self.mobile) != 0:
self.e_mobile['values'] = self.mobile
self.e_mobile.current(0)
if len(self.business) != 0:
self.e_business['values'] = self.business # Set the value to the new list
self.e_business.current(0) # Set the first item of the list as current item
if len(self.business) != 0:
self.e_fax['values'] = self.fax
self.e_fax.current(0) ### Line 337 - No entry for this value in my sqlite database
Any idea, what I can search for ?
So self.e_faxseems like a ttk.Combobox to me. Consider this code here:
import tkinter as tk
from tkinter import ttk
root = tk.Tk()
values = []
lb = ttk.Combobox(root,values=values)
lb.current(0)
lb.pack()
root.mainloop()
it throughs the same Error:
_tkinter.TclError: Index 0 out of range
and the reason is the list values is empty, insert any regular string in it and it works.
Make sure if you want to set default value that there is an value.
import tkinter as tk
from tkinter import ttk
root = tk.Tk()
values = ['see']
lb = ttk.Combobox(root,values=values)
lb.current(0)
lb.pack()
root.mainloop()
Related
Thanks in advance for your help.
I'm currently running a webscraper - this is the first time I've ever done something like this - It pulls addresses from the URL and then will match the address to the users input. This will be going into a chat bot, I wondering how I can make this run on Google Functions. Whats the process to do this, is there a tutorial anywhere?
This is my code so far. There is a small items file too
from scrapy.spiders import CrawlSpider, Rule
from scrapy.linkextractors import LinkExtractor
from ..items import DataItem
from fuzzywuzzy import fuzz
from urllib.parse import urljoin
import scrapy
class AddressesSpider(scrapy.Spider):
name = 'Addresses'
allowed_domains = ['find-energy-certificate.service.gov.uk']
postcode = "bh10+4ah"
start_urls = ['https://find-energy-certificate.service.gov.uk/find-a-certificate/search-by-postcode?postcode=' + postcode]
## def start_requests(self):
## self.first = input("Please enter the address you would like to match: ")
## yield scrapy.Request(url=self.start_urls[0], callback=self.parse)
def parse(self, response):
first = input("Please enter the address you would like to match: ")
highest_ratios = []
highest_item = None
for row in response.xpath('//table[#class="govuk-table"]//tr'):
address = row.xpath("normalize-space(.//a[#class='govuk-link']/text())").extract()[0].lower()
address = address.rsplit(',', 2)[0]
link = row.xpath('.//a[#class="govuk-link"]/#href').extract()
details = row.xpath("normalize-space(.//td/following-sibling::td)").extract()
ratio = fuzz.token_set_ratio(address, first)
item = DataItem()
item['link'] = link
item['details'] = details
item['address'] = address
item['ratioresult'] = ratio
if len(highest_ratios) < 3:
highest_ratios.append(item)
elif ratio > min(highest_ratios, key=lambda x: x['ratioresult'])['ratioresult']:
highest_ratios.remove(min(highest_ratios, key=lambda x: x['ratioresult']))
highest_ratios.append(item)
highest_ratios_100 = [item for item in highest_ratios if item['ratioresult'] == 100]
if highest_ratios_100:
for item in highest_ratios_100:
yield item
else:
yield max(highest_ratios, key=lambda x: x['ratioresult'])
if len(highest_ratios_100) > 1:
for i, item in enumerate(highest_ratios_100):
print(f"{i+1}: {item['address']}")
selected = int(input("Please select the correct address by entering the number corresponding to the address: ")) - 1
selected_item = highest_ratios_100[selected]
else:
selected_item = highest_ratios_100[0] if highest_ratios_100 else max(highest_ratios, key=lambda x: x['ratioresult'])
new_url = selected_item['link'][0]
new_url = str(new_url)
if new_url:
base_url = 'https://find-energy-certificate.service.gov.uk'
print(f'Base URL: {base_url}')
print(f'New URL: {new_url}')
new_url = urljoin(base_url, new_url)
print(f'Combined URL: {new_url}')
yield scrapy.Request(new_url, callback=self.parse_new_page)
def parse_new_page(self, response):
Postcode = response.xpath('normalize-space((//p[#class="epc-address govuk-body"]/text())[last()])').extract()
Town = response.xpath('normalize-space((//p[#class="epc-address govuk-body"]/text())[last()-1])').extract()
First = response.xpath(".//p[#class='epc-address govuk-body']").extract()
Type = response.xpath('normalize-space(//dd[1]/text())').extract_first()
Walls = response.xpath("//th[contains(text(), 'Wall')]/following-sibling::td[1]/text()").extract()
Roof = response.xpath("//th[contains(text(), 'Roof')]/following-sibling::td[1]/text()").extract()
Heating = response.xpath("//th[text()='Main heating']/following-sibling::td[1]/text()").extract_first()
CurrentScore = response.xpath('//body[1]/div[2]/main[1]/div[1]/div[3]/div[3]/svg[1]/svg[1]/text[1]/text()').re_first("[0-9+]{1,2}")
Maxscore = response.xpath('//body[1]/div[2]/main[1]/div[1]/div[3]/div[3]/svg[1]/svg[2]/text[1]/text()').re_first("[0-9+]{2}")
Expiry = response.xpath('normalize-space(//b)').extract_first()
FloorArea = response.xpath('//dt[contains(text(), "floor area")]/following-sibling::dd/text()').re_first("[0-9+]{2,3}")
Steps = response.xpath("//h3[contains(text(),'Step')]/text()").extract()
yield {
'Postcode': Postcode,
'Town': Town,
'First': First,
'Type': Type,
'Walls': Walls,
'Roof': Roof,
'Heating': Heating,
'CurrentScore': CurrentScore,
'Maxscore': Maxscore,
'Expiry': Expiry,
'FloorArea': FloorArea,
'Steps': Steps
}
I've tried googling and having a look around and can't get how to deploy this as a project to run on google functions or can I just copy the code into the console somewhere?
You can try running your spider from a script. However, a better solution would be to wrap scrapy in its own child process.
For example:
from multiprocessing import Process, Queue
from ... import MySpider
from scrapy.crawler import CrawlerProcess
from scrapy.utils.project import get_project_settings
def my_cloud_function(event, context):
def script(queue):
try:
settings = get_project_settings()
settings.setdict({
'LOG_LEVEL': 'ERROR',
'LOG_ENABLED': True,
})
process = CrawlerProcess(settings)
process.crawl(MySpider)
process.start()
queue.put(None)
except Exception as e:
queue.put(e)
queue = Queue()
# wrap the spider in a child process
main_process = Process(target=script, args=(queue,))
main_process.start() # start the process
main_process.join() # block until the spider finishes
result = queue.get() # check the process did not return an error
if result is not None:
raise result
return 'ok'
You can refer to this tutorial for more info.
I have 2 datastore models:
class KindA(ndb.Model):
field_a1 = ndb.StringProperty()
field_a2 = ndb.StringProperty()
class KindB(ndb.Model):
field_b1 = ndb.StringProperty()
field_b2 = ndb.StringProperty()
key_to_kind_a = ndb.KeyProperty(KindA)
I want to query KindB and output it to a csv file, but if an entity of KindB points to an entity in KindA I want those fields to be present in the csv as well.
If I was able to use ndb inside of a transform I would setup my pipeline like this
def format(element): # element is an `entity_pb2` object of KindB
try:
obj_a_key_id = element.properties.get('key_to_kind_a', None).key_value.path[0]
except:
obj_a_key_id = None
# <<<<<<<<<<<<<<<<<<<<<<<<<<<<<< HOW DO I DO THIS
obj_a = ndb.Key(KindA, obj_a_key_id).get() if obj_a_key_id else None
return ",".join([
element.properties.get('field_b1', None).string_value,
element.properties.get('field_b2', None).string_value,
obj_a.properties.get('field_a1', None).string_value if obj_a else '',
obj_a.properties.get('field_a2', None).string_value if obj_a else '',
]
def build_pipeline(project, start_date, end_date, export_path):
query = query_pb2.Query()
query.kind.add().name = 'KindB'
filter_1 = datastore_helper.set_property_filter(query_pb2.Filter(), 'field_b1', PropertyFilter.GREATER_THAN, start_date)
filter_2 = datastore_helper.set_property_filter(query_pb2.Filter(), 'field_b1', PropertyFilter.LESS_THAN, end_date)
datastore_helper.set_composite_filter(query.filter, CompositeFilter.AND, filter_1, filter_2)
p = beam.Pipeline(options=pipeline_options)
_ = (p
| 'read from datastore' >> ReadFromDatastore(project, query, None)
| 'format' >> beam.Map(format)
| 'write' >> apache_beam.io.WriteToText(
file_path_prefix=export_path,
file_name_suffix='.csv',
header='field_b1,field_b2,field_a1,field_a2',
num_shards=1)
)
return p
I suppose I could use ReadFromDatastore to query all entities of KindA and then use CoGroupByKey to merge them, but KindA has millions of records and that would be very inefficient.
Per the reccommendations in this answer: https://stackoverflow.com/a/49130224/4458510
I created the following utils, which were inspired by the source code of
DatastoreWriteFn in apache_beam.io.gcp.datastore.v1.datastoreio
write_mutations and fetch_entities in apache_beam.io.gcp.datastore.v1.helper
import logging
import time
from socket import error as _socket_error
from apache_beam.metrics import Metrics
from apache_beam.transforms import DoFn, window
from apache_beam.utils import retry
from apache_beam.io.gcp.datastore.v1.adaptive_throttler import AdaptiveThrottler
from apache_beam.io.gcp.datastore.v1.helper import make_partition, retry_on_rpc_error, get_datastore
from apache_beam.io.gcp.datastore.v1.util import MovingSum
from apache_beam.utils.windowed_value import WindowedValue
from google.cloud.proto.datastore.v1 import datastore_pb2, query_pb2
from googledatastore.connection import Datastore, RPCError
_WRITE_BATCH_INITIAL_SIZE = 200
_WRITE_BATCH_MAX_SIZE = 500
_WRITE_BATCH_MIN_SIZE = 10
_WRITE_BATCH_TARGET_LATENCY_MS = 5000
def _fetch_keys(project_id, keys, datastore, throttler, rpc_stats_callback=None, throttle_delay=1):
req = datastore_pb2.LookupRequest()
req.project_id = project_id
for key in keys:
req.keys.add().CopyFrom(key)
#retry.with_exponential_backoff(num_retries=5, retry_filter=retry_on_rpc_error)
def run(request):
# Client-side throttling.
while throttler.throttle_request(time.time() * 1000):
logging.info("Delaying request for %ds due to previous failures", throttle_delay)
time.sleep(throttle_delay)
if rpc_stats_callback:
rpc_stats_callback(throttled_secs=throttle_delay)
try:
start_time = time.time()
response = datastore.lookup(request)
end_time = time.time()
if rpc_stats_callback:
rpc_stats_callback(successes=1)
throttler.successful_request(start_time * 1000)
commit_time_ms = int((end_time - start_time) * 1000)
return response, commit_time_ms
except (RPCError, _socket_error):
if rpc_stats_callback:
rpc_stats_callback(errors=1)
raise
return run(req)
# Copied from _DynamicBatchSizer in apache_beam.io.gcp.datastore.v1.datastoreio
class _DynamicBatchSizer(object):
"""Determines request sizes for future Datastore RPCS."""
def __init__(self):
self._commit_time_per_entity_ms = MovingSum(window_ms=120000, bucket_ms=10000)
def get_batch_size(self, now):
"""Returns the recommended size for datastore RPCs at this time."""
if not self._commit_time_per_entity_ms.has_data(now):
return _WRITE_BATCH_INITIAL_SIZE
recent_mean_latency_ms = (self._commit_time_per_entity_ms.sum(now) / self._commit_time_per_entity_ms.count(now))
return max(_WRITE_BATCH_MIN_SIZE,
min(_WRITE_BATCH_MAX_SIZE,
_WRITE_BATCH_TARGET_LATENCY_MS / max(recent_mean_latency_ms, 1)))
def report_latency(self, now, latency_ms, num_mutations):
"""Reports the latency of an RPC to Datastore.
Args:
now: double, completion time of the RPC as seconds since the epoch.
latency_ms: double, the observed latency in milliseconds for this RPC.
num_mutations: int, number of mutations contained in the RPC.
"""
self._commit_time_per_entity_ms.add(now, latency_ms / num_mutations)
class LookupKeysFn(DoFn):
"""A `DoFn` that looks up keys in the Datastore."""
def __init__(self, project_id, fixed_batch_size=None):
self._project_id = project_id
self._datastore = None
self._fixed_batch_size = fixed_batch_size
self._rpc_successes = Metrics.counter(self.__class__, "datastoreRpcSuccesses")
self._rpc_errors = Metrics.counter(self.__class__, "datastoreRpcErrors")
self._throttled_secs = Metrics.counter(self.__class__, "cumulativeThrottlingSeconds")
self._throttler = AdaptiveThrottler(window_ms=120000, bucket_ms=1000, overload_ratio=1.25)
self._elements = []
self._batch_sizer = None
self._target_batch_size = None
def _update_rpc_stats(self, successes=0, errors=0, throttled_secs=0):
"""Callback function, called by _fetch_keys()"""
self._rpc_successes.inc(successes)
self._rpc_errors.inc(errors)
self._throttled_secs.inc(throttled_secs)
def start_bundle(self):
"""(re)initialize: connection with datastore, _DynamicBatchSizer obj"""
self._elements = []
self._datastore = get_datastore(self._project_id)
if self._fixed_batch_size:
self._target_batch_size = self._fixed_batch_size
else:
self._batch_sizer = _DynamicBatchSizer()
self._target_batch_size = self._batch_sizer.get_batch_size(time.time()*1000)
def process(self, element):
"""Collect elements and process them as a batch"""
self._elements.append(element)
if len(self._elements) >= self._target_batch_size:
return self._flush_batch()
def finish_bundle(self):
"""Flush any remaining elements"""
if self._elements:
objs = self._flush_batch()
for obj in objs:
yield WindowedValue(obj, window.MAX_TIMESTAMP, [window.GlobalWindow()])
def _flush_batch(self):
"""Fetch all of the collected keys from datastore"""
response, latency_ms = _fetch_keys(
project_id=self._project_id,
keys=self._elements,
datastore=self._datastore,
throttler=self._throttler,
rpc_stats_callback=self._update_rpc_stats)
logging.info("Successfully read %d keys in %dms.", len(self._elements), latency_ms)
if not self._fixed_batch_size:
now = time.time()*1000
self._batch_sizer.report_latency(now, latency_ms, len(self._elements))
self._target_batch_size = self._batch_sizer.get_batch_size(now)
self._elements = []
return [entity_result.entity for entity_result in response.found]
class LookupEntityFieldFn(LookupKeysFn):
"""
Looks-up a field on an EntityPb2 object
Expects a EntityPb2 object as input
Outputs a tuple, where the first element is the input object and the second element is the object found during the
lookup
"""
def __init__(self, project_id, field_name, fixed_batch_size=None):
super(LookupEntityFieldFn, self).__init__(project_id=project_id, fixed_batch_size=fixed_batch_size)
self._field_name = field_name
#staticmethod
def _pb2_key_value_to_tuple(kv):
"""Converts a key_value object into a tuple, so that it can be a dictionary key"""
path = []
for p in kv.path:
path.append(p.name)
path.append(p.id)
return tuple(path)
def _flush_batch(self):
_elements = self._elements
keys_to_fetch = []
for element in self._elements:
kv = element.properties.get(self._field_name, None)
if kv and kv.key_value and kv.key_value.path:
keys_to_fetch.append(kv.key_value)
self._elements = keys_to_fetch
read_keys = super(LookupEntityFieldFn, self)._flush_batch()
_by_key = {self._pb2_key_value_to_tuple(entity.key): entity for entity in read_keys}
output_pairs = []
for input_obj in _elements:
kv = input_obj.properties.get(self._field_name, None)
output_obj = None
if kv and kv.key_value and kv.key_value.path:
output_obj = _by_key.get(self._pb2_key_value_to_tuple(kv.key_value), None)
output_pairs.append((input_obj, output_obj))
return output_pairs
The Key to this is the line response = datastore.lookup(request), where:
datastore = get_datastore(project_id) (from apache_beam.io.gcp.datastore.v1.helper.get_datastore)
request is a LookupRequest from google.cloud.proto.datastore.v1.datastore_pb2
response is LookupResponse from google.cloud.proto.datastore.v1.datastore_pb2
The rest of the above code does things like:
using a single connection to the datastore for a dofn bundle
batches keys together before performing a lookup request
throttles interactions with the datastore if requests start to fail
(honestly I don't know how critical these bits are, I just came across them when browsing the apache_beam source code)
The resulting util function LookupEntityFieldFn(project_id, field_name) is a DoFn that takes in an entity_pb2 object as input, extracts and fetches/gets the key_property that resides on the field field_name, and outputs the result as a tuple (the fetch-result is paired with the input object)
My Pipeline code then became
def format(element): # element is a tuple `entity_pb2` objects
kind_b_element, kind_a_element = element
return ",".join([
kind_b_element.properties.get('field_b1', None).string_value,
kind_b_element.properties.get('field_b2', None).string_value,
kind_a_element.properties.get('field_a1', None).string_value if kind_a_element else '',
kind_a_element.properties.get('field_a2', None).string_value if kind_a_element else '',
]
def build_pipeline(project, start_date, end_date, export_path):
query = query_pb2.Query()
query.kind.add().name = 'KindB'
filter_1 = datastore_helper.set_property_filter(query_pb2.Filter(), 'field_b1', PropertyFilter.GREATER_THAN, start_date)
filter_2 = datastore_helper.set_property_filter(query_pb2.Filter(), 'field_b1', PropertyFilter.LESS_THAN, end_date)
datastore_helper.set_composite_filter(query.filter, CompositeFilter.AND, filter_1, filter_2)
p = beam.Pipeline(options=pipeline_options)
_ = (p
| 'read from datastore' >> ReadFromDatastore(project, query, None)
| 'extract field' >> apache_beam.ParDo(LookupEntityFieldFn(project_id=project, field_name='key_to_kind_a'))
| 'format' >> beam.Map(format)
| 'write' >> apache_beam.io.WriteToText(
file_path_prefix=export_path,
file_name_suffix='.csv',
header='field_b1,field_b2,field_a1,field_a2',
num_shards=1)
)
return p
I have the below PeopleCode step in an Application Engine program that reads a CSV file using a File Layout and then inserts the data into a table, and I am just trying to get a better understanding of how the the line of code (&SQL1 = CreateSQL("%Insert(:1)");) in the below script gets generated. It looks like the CreateSQL is using a bind variable (:1) inside the Insert statement, but I am struggling as where to find where this variable is defined in the program.
Function EditRecord(&REC As Record) Returns boolean;
Local integer &E;
&REC.ExecuteEdits(%Edit_Required + %Edit_DateRange + %Edit_YesNo + %Edit_OneZero);
If &REC.IsEditError Then
For &E = 1 To &REC.FieldCount
&MYFIELD = &REC.GetField(&E);
If &MYFIELD.EditError Then
&MSGNUM = &MYFIELD.MessageNumber;
&MSGSET = &MYFIELD.MessageSetNumber;
&LOGFILE.WriteLine("****Record:" | &REC.Name | ", Field:" | &MYFIELD.Name);
&LOGFILE.WriteLine("****" | MsgGet(&MSGSET, &MSGNUM, ""));
End-If;
End-For;
Return False;
Else
Return True;
End-If;
End-Function;
Function ImportSegment(&RS2 As Rowset, &RSParent As Rowset)
Local Rowset &RS1, &RSP;
Local string &RecordName;
Local Record &REC2, &RECP;
Local SQL &SQL1;
Local integer &I, &L;
&SQL1 = CreateSQL("%Insert(:1)");
rem &SQL1 = CreateSQL("%Insert(:1) Order by COUNT_ORDER");
&RecordName = "RECORD." | &RS2.DBRecordName;
&REC2 = CreateRecord(#(&RecordName));
&RECP = &RSParent(1).GetRecord(#(&RecordName));
For &I = 1 To &RS2.ActiveRowCount
&RS2(&I).GetRecord(1).CopyFieldsTo(&REC2);
If (EditRecord(&REC2)) Then
&SQL1.Execute(&REC2);
&RS2(&I).GetRecord(1).CopyFieldsTo(&RECP);
For &L = 1 To &RS2.GetRow(&I).ChildCount
&RS1 = &RS2.GetRow(&I).GetRowset(&L);
If (&RS1 <> Null) Then
&RSP = &RSParent.GetRow(1).GetRowset(&L);
ImportSegment(&RS1, &RSP);
End-If;
End-For;
If &RSParent.ActiveRowCount > 0 Then
&RSParent.DeleteRow(1);
End-If;
Else
&LOGFILE.WriteRowset(&RS);
&LOGFILE.WriteLine("****Correct error in this record and delete all error messages");
&LOGFILE.WriteRecord(&REC2);
For &L = 1 To &RS2.GetRow(&I).ChildCount
&RS1 = &RS2.GetRow(&I).GetRowset(&L);
If (&RS1 <> Null) Then
&LOGFILE.WriteRowset(&RS1);
End-If;
End-For;
End-If;
End-For;
End-Function;
rem *****************************************************************;
rem * PeopleCode to Import Data *;
rem *****************************************************************;
Local File &FILE1, &FILE3;
Local Record &REC1;
Local SQL &SQL1;
Local Rowset &RS1, &RS2;
Local integer &M;
&FILE1 = GetFile("\\nt115\apps\interface_prod\interface_in\Item_Loader\ItemPriceFile.csv", "r", "a", %FilePath_Absolute);
&LOGFILE = GetFile("\\nt115\apps\interface_prod\interface_in\Item_Loader\ItemPriceFile.txt", "r", "a", %FilePath_Absolute);
&FILE1.SetFileLayout(FileLayout.GH_ITM_PR_UPDT);
&LOGFILE.SetFileLayout(FileLayout.GH_ITM_PR_UPDT);
&RS1 = &FILE1.CreateRowset();
&RS = CreateRowset(Record.GH_ITM_PR_UPDT);
REM &SQL1 = CreateSQL("%Insert(:1)");
&SQL1 = CreateSQL("%Insert(:1)");
/*Skip Header Row: The following line of code reads the first line in the file layout (the header)
and does nothing. Then the pointer goes to the next line in the file and starts using the
file.readrowset*/
&some_boolean = &FILE1.ReadLine(&string);
&RS1 = &FILE1.ReadRowset();
While &RS1 <> Null
ImportSegment(&RS1, &RS);
&RS1 = &FILE1.ReadRowset();
End-While;
&FILE1.Close();
&LOGFILE.Close();
The :1 is coming from the line further down &SQL1.Execute(&REC2);
&REC2 gets assigned a record object, so the line &SQL1.Execute(&REC2); evaluates to %Insert(your_record_object)
Here is a simple example that's doing basically the same thing
Here is a description of %Insert
Answer because too long to comment:
The table name is most likely (PS_)GH_ITM_PR_UPDT. The general consensus is to name the FileLayout the same as the record it is based on.
If not, it is defined in FileLayout.GH_ITM_PR_UPDT. Open the FileLayout, right click the segment and under 'Selected Node Properties' you will find the 'File Record Name'.
In your code this record is carried over into &RS1.
&FILE1.SetFileLayout(FileLayout.GH_ITM_PR_UPDT);
&RS1 = &FILE1.CreateRowset();
The rowset is a collection of rows. A row consists of records and a record is a row of data from a database table. (Peoplesoft Object Data Types are fun...)
This rowset is filled with data in the following statement:
&RS1 = &FILE1.ReadRowset();
This uses your file as input and outputs a rowset collection, mapping the data to records based on how you defined your FileLayout.
The result is fed into the ImportSegment function:
ImportSegment(&RS1, &RS);
Function ImportSegment(&RS2 As Rowset, &RSParent As Rowset)
&RS2 in the function is a reference to &RS1 in the rest of your code.
The table name is also hidden here:
&RecordName = "RECORD." | &RS2.DBRecordName;
So if you can't/don't want to check the FileLayout, you could output &RS2.DBRecordName with a messagebox and your answer will be Message Log of your Process Monitor.
Finally a record object is created for this database table and it is filled with a row from the rowset. This record is inserted into the database table:
&REC2 = CreateRecord(#(&RecordName));
&RS2(&I).GetRecord(1).CopyFieldsTo(&REC2);
&SQL1 = CreateSQL("%Insert(:1)");
&SQL1.Execute(&REC2);
TLDR:
Table name can be found in the FileLayout or output in the ImportSegment Function as &RS2.DBRecordName
While executing the following code i'm getting below error, Just for information matchObj here returns a tuple value ..
$ ./ftpParser3_re_dup.py
Traceback (most recent call last):
File "./ftpParser3_re_dup.py", line 13, in <module>
print("{0:<30}{1:<20}{2:<50}{3:<15}".format("FTP ACCOUNT","Account Type","Term Flag"))
IndexError: tuple index out of range
Code is below:
from __future__ import print_function
from signal import signal, SIGPIPE, SIG_DFL
signal(SIGPIPE,SIG_DFL)
import re
with open('all_adta', 'r') as f:
for line in f:
line = line.strip()
data = f.read()
# Making description & termflag optional in the regex pattern as it's missing in the "data_test" file with several occurrences.
regex = (r"dn:(.*?)\nftpuser: (.*)\n(?:description:* (.*))?\n(?:termflag:* (.*))")
matchObj = re.findall(regex, data)
print("{0:<30}{1:<20}{2:<50}{3:<15}".format("FTP ACCOUNT","Account Type","Term Flag"))
print("{0:<30}{1:<20}{2:<50}{3:<15}".format("-----------","------------","--------"))
for index in matchObj:
index_str = ' '.join(index)
new_str = re.sub(r'[=,]', ' ', index_str)
new_str = new_str.split()
# In below print statement we are using "index[2]" as index is tuple here, this is because
# findall() returns the matches as a list, However with groups, it returns it as a list of tuples.
print("{0:<30}{1:<20}{2:<50}{3:<15}".format(new_str[1],new_str[8],index[2],index[3]))
In the line print("{0:<30}{1:<20}{2:<50}{3:<15}".format("FTP ACCOUNT","Account Type","Term Flag")) you have mentioned 4 indices but given only 3 i.e. "FTP ACCOUNT","Account Type","Term Flag"
Remove the 4th index or add a new one
import wx
import sqlite3
class Frame(wx.Frame):
def __init__(self):
wx.Frame.__init__(self, None)
self.panel = wx.Panel(self)
self.text = wx.StaticText(self.panel)
self.conn = sqlite3.connect("test.db")
self.cursor = self.conn.cursor()
self.autoRefersh()
def autoRefersh(self):
self.LoadList()
wx.CallLater(1000, self.autoRefersh)
def LoadList(self):
self.cursor.execute("SELECT *FROM CLINIC1")
for date1 in self.cursor: pass
self.staticText2_1 = wx.StaticText(self.panel, label=date1[1], style=wx.ALIGN_CENTER, pos=(100,100))
if __name__ == '__main__':
app = wx.App()
frame = Frame()
frame.Show()
app.MainLoop()
combobox data sqlite3 save in why panel show Why it looks different bug??
I do not know why this is happening.
You missed one crucial step, getting the data itself.
You are using the cursor object not the data returned by the cursor.
def LoadList(self):
self.cursor.execute("SELECT *FROM CLINIC1")
data = self.cursor.fetchall()
for date1 in data: pass
self.staticText2_1 = wx.StaticText(self.panel, label=date1[1], style=wx.ALIGN_CENTER, pos=(100,100))
AS you are "passing" in your for loop perhaps what you actually want is only a single record, in which case
data = self.cursor.fetchone()
and drop the for loop
Even better, read the tutorial
https://www.blog.pythonlibrary.org/2012/07/18/python-a-simple-step-by-step-sqlite-tutorial/
In the heading of your question you mention combobox, so I assume that you want to replace the statictext with a combobox. The following should get you started, I'll leave the wx.EVT_COMBOBOX event binding for you to add, as you will need it to do something when you select an item.
import wx
import sqlite3
class Frame(wx.Frame):
def __init__(self):
wx.Frame.__init__(self, None)
self.selected_data=[]
self.panel = wx.Panel(self)
self.combo = wx.ComboBox(self.panel,-1,choices=self.selected_data, size=(130,30))
self.conn = sqlite3.connect("test.db")
self.cursor = self.conn.cursor()
self.combo.SetValue("Choose an Item")
self.autoRefresh()
def autoRefresh(self):
self.LoadList()
def LoadList(self):
self.combo.Clear()
self.cursor.execute("SELECT * FROM CLINIC1")
data = self.cursor.fetchall()
for date1 in data:
self.selected_data.append(date1[1])
for i in self.selected_data:
self.combo.Append(i)
if __name__ == '__main__':
app = wx.App()
frame = Frame()
frame.Show()
app.MainLoop()
Edit:
It should look like this.