I have a program that I want to distribute to several customers but with their own name and logo and with different modes. Each customer need 3 different files and if I have 20 customers I would need 60 spec files. I have tried to create a .spec file that would create all in 1 compile but it just overwrites the same file and I end up with 1 file.
Is it possible to create multiple executables with PyInstaller from a single .py using only 1 .spec file?
I tried creating a list with all EXE, PYZ and so on but still only 1 executable gets created. In the 'build' folder created after i can see multiple .toc .pyz .pkg with different names. 00,01,02 and so on. It seems it is possible to do this but I'm missing something.
What am I doing wrong?
I'm using Python 3.6 and PyInstaller 3.5.
# -*- mode: python -*-
import glob
import os
import sys
block_cipher = None
companies_to_create = ["CustomerA", "CustomerB"]
modes = ["config", "service"]
# Testing list, static
list_PYZ = [None] * 4
list_EXE = [None] * 4
list_A = [None] * 4
app_nr = 0
for company in companies_to_create:
for mode in modes:
# Changes mode in file to create
with open("mainprogram/mode.py", "w") as f_mode:
f_mode.write('mode="' + mode + '"')
f_mode.close()
# Changes company in file to create
with open("mainprogram/company.py", "w") as f_company:
f_company.write('company="' + company + '"')
f_company.close()
from PyInstaller.utils.hooks import collect_submodules, collect_data_files
from mainprogram.brands import company_app
hiddenimports = collect_submodules('pubsub')
list_A[app_nr] = Analysis(['mainprogram/mainprogram.py'],
pathex=[],
binaries=[],
hiddenimports=hiddenimports,
hookspath=[],
runtime_hooks=[],
excludes=['tkinter'],
win_no_prefer_redirects=False,
win_private_assemblies=False,
cipher=block_cipher)
images = glob.glob("mainprogram/images/**/*", recursive=True)
icons = glob.glob("mainprogram/icons/**/*", recursive=True)
files = images + icons
for f in files:
if os.path.isfile(f):
path = f.split('mainprogram/')[1]
list_A[app_nr].datas += [((path, f, 'Data'))]
list_PYZ[app_nr] = PYZ(list_A[app_nr].pure, list_A[app_nr].zipped_data, cipher=block_cipher)
list_EXE[app_nr] = EXE(list_PYZ[app_nr],
list_A[app_nr].scripts,
list_A[app_nr].binaries,
list_A[app_nr].zipfiles,
list_A[app_nr].datas,
name=company_app.exe_name,
debug=False,
strip=False,
upx=False,
console=False,
icon='mainprogram/icons/' + company + '.ico')
app_nr += 1
Related
Having an issue where the end main.exe file can be booted, however when I try and access one of the files inside the "examples" folder, it cannot find it. My file structure can be found here: https://github.com/Zach10a/PteraSoftware/tree/create_gui
My .spec file can also be found there, but is as follows
# -*- mode: python ; coding: utf-8 -*-
import os
import sys
import PySide2
from PyInstaller.building.api import PYZ, EXE, COLLECT
from PyInstaller.building.build_main import Analysis
block_cipher = None
one_dir_mode = True
binaries = []
if sys.platform.startswith('win'):
qt_plugins_path = os.path.join(PySide2.__path__[0], "plugins")
binaries = [
(os.path.join(PySide2.__path__[0], "plugins"), 'PySide2')
]
elif sys.platform.startswith('linux'):
qt_plugins_path = os.path.join(PySide2.__path__[0], "Qt", "plugins", "platforms")
binaries = [
(os.path.join(sys.base_prefix, "lib", "libspatialindex_c.so"), '.'),
# (os.path.join(PySide2.__path__[0], "Qt", "plugins", "platforms"), '.')
]
upx = False # UPX does not play with anything Qt
upx_exclude = [
'PySide2',
'qwindows.dll'
]
a = Analysis(
['main.py'],
pathex=[],
binaries=[],
datas=[('docs/Logo.ico', 'docs'),
('docs/Black_Text_Logo.ico', 'docs'),
('docs/Logo.png', 'docs'),
('docs/Black_Text_Logo.png', 'docs'),
("README.md", '.')
# ('examples/analyze_steady_trim_example.py', 'examples'),
# ('examples/analyze_unsteady_trim_example.py', 'examples'),
# ('examples/steady_convergence_example.py', 'examples'),
# ('examples/steady_horseshoe_vortex_lattice_method_solver.py', 'examples'),
# ('examples/steady_ring_vortex_lattice_method_solver.py', 'examples'),
# ('examples/unsteady_ring_vortex_lattice_method_solver_static.py', 'examples'),
# ('examples/unsteady_ring_vortex_lattice_method_solver_variable.py', 'examples'),
# ('examples/unsteady_ring_vortex_lattice_method_solver_variable_formation.py', 'examples'),
# ('examples/unsteady_static_convergence_example.py', 'examples'),
# ('examples/unsteady_variable_convergence_example.py', 'examples'),
],
hiddenimports=['vtkmodules','vtkmodules.all','vtkmodules.qt.QVTKRenderWindowInteractor','vtkmodules.util','vtkmodules.util.numpy_support', 'examples'],
hookspath=[],
hooksconfig={},
runtime_hooks=[],
excludes=[],
win_no_prefer_redirects=False,
win_private_assemblies=False,
cipher=block_cipher,
noarchive=False,
)
pyz = PYZ(a.pure, a.zipped_data, cipher=block_cipher)
exe = EXE(
pyz,
a.scripts,
[],
exclude_binaries=True,
name='main',
debug=False,
bootloader_ignore_signals=False,
strip=False,
upx=True,
console=True,
disable_windowed_traceback=False,
argv_emulation=False,
target_arch=None,
codesign_identity=None,
entitlements_file=None,
icon='docs/Logo.ico'
)
coll = COLLECT(
exe,
a.binaries,
a.zipfiles,
a.datas,
strip=False,
upx=True,
upx_exclude=[],
name='main',
)
I get the follwing Traceback and error message:
Builtin modules imported
QTCore imported
QtGUI imported
Traceback (most recent call last):
File "main.py", line 29, in <lambda>
File "main.py", line 42, in exampleMenu
FileNotFoundError: [WinError 3] The system cannot find the path specified: 'examples'
I've tried adding them all in as datas in the analysis section (as you can see below, they are blanked out so I can get the error message), however that's not exactly scalable and feels very sloppy. I think it is something to do with using importlib.import_module(file) in main.py to import those example files as modules.
I need to download multiple 10-ks documents, however, this code works fine if i download the 10-ks between 5-10 companies. But if i increase the number of companies in [cik_lookup function]. Here's code.
import nltk
import numpy as np
import pandas as pd
import pickle
import pprint
import project_helper
from tqdm import tqdm
Here's the py file that includes project_helper functions.
import matplotlib.pyplot as plt
import requests
from ratelimit import limits, sleep_and_retry
class SecAPI(object):
SEC_CALL_LIMIT = {'calls': 10, 'seconds': 1}
#staticmethod
#sleep_and_retry
# Dividing the call limit by half to avoid coming close to the limit
#limits(calls=SEC_CALL_LIMIT['calls'] / 2, period=SEC_CALL_LIMIT['seconds'])
def _call_sec(url):
return requests.get(url)
def get(self, url):
return self._call_sec(url).text
def print_ten_k_data(ten_k_data, fields, field_length_limit=50):
indentation = ' '
print('[')
for ten_k in ten_k_data:
print_statement = '{}{{'.format(indentation)
for field in fields:
value = str(ten_k[field])
# Show return lines in output
if isinstance(value, str):
value_str = '\'{}\''.format(value.replace('\n', '\\n'))
else:
value_str = str(value)
# Cut off the string if it gets too long
if len(value_str) > field_length_limit:
value_str = value_str[:field_length_limit] + '...'
print_statement += '\n{}{}: {}'.format(indentation * 2, field, value_str)
print_statement += '},'
print(print_statement)
print(']')
The first step it to download NLP Corpora.
nltk.download('stopwords')
nltk.download('wordnet')
Than Get 10ks
#cik_lookup = {
# 'GOOGL':'0001288776',
# 'AAPL':'0000320193',
# 'FACEBOOK':'0001326801',
# 'AMZN':'0001018724',
# 'MSFT':'0000789019'}
cik_lookup = {
'AEP': '0000004904',
'AXP': '0000004962',
'BA': '0000012927',
'BK': '0001390777',
'CAT': '0000018230',
'DE': '0000315189',
'DIS': '0001001039',
'DTE': '0000936340',
'ED': '0001047862',
'EMR': '0000032604',
'ETN': '0001551182',
'GE': '0000040545',
'IBM': '0000051143',
'IP': '0000051434',
'JNJ': '0000200406',
'KO': '0000021344',
'LLY': '0000059478',
'MCD': '0000063908',
'MO': '0000764180',
'MRK': '0000310158',
'MRO': '0000101778',
'PCG': '0001004980',
'PEP': '0000077476',
'PFE': '0000078003',
'PG': '0000080424',
'PNR': '0000077360',
'SYY': '0000096021',
'TXN': '0000097476',
'UTX': '0000101829',
'WFC': '0000072971',
'WMT': '0000104169',
'WY': '0000106535',
'XOM': '0000034088'}
Get list of 10-ks
sec_api = project_helper.SecAPI()
from bs4 import BeautifulSoup
def get_sec_data(cik, doc_type, start=0, count=60):
newest_pricing_data = pd.to_datetime('2021-01-01')
rss_url = 'https://www.sec.gov/cgi-bin/browse-edgar?action=getcompany' \
'&CIK={}&type={}&start={}&count={}&owner=exclude&output=atom' \
.format(cik, doc_type, start, count)
sec_data = sec_api.get(rss_url)
feed = BeautifulSoup(sec_data.encode('utf-8'), 'xml').feed
entries = [
(
entry.content.find('filing-href').getText(),
entry.content.find('filing-type').getText(),
entry.content.find('filing-date').getText())
for entry in feed.find_all('entry', recursive=False)
if pd.to_datetime(entry.content.find('filing-date').getText()) <= newest_pricing_data]
return entries
example_ticker = 'AEP'
sec_data = {}
for ticker, cik in cik_lookup.items():
sec_data[ticker] = get_sec_data(cik, '10-K')
The code works fine if i download the 10-ks between 5-10 companies. But if i increase the number of companies in [cik_lookup function] I get the following error. The first error I got is as below.
UnicodeEncodeError Traceback (most recent call last)
<ipython-input-8-28a784054794> in <module>()
20
21 for ticker, cik in cik_lookup.items():
---> 22 sec_data[ticker] = get_sec_data(cik, '10-K')
<ipython-input-8-28a784054794> in get_sec_data(cik, doc_type, start, count)
5 rss_url = 'https://www.sec.gov/cgi-bin/browse-edgar?action=getcompany' '&CIK={}&type={}&start={}&count={}&owner=exclude&output=atom' .format(cik, doc_type, start, count)
6 sec_data = sec_api.get(rss_url)
----> 7 feed = BeautifulSoup(sec_data.encode('ascii'), 'xml').feed
8 entries = [
9 (
UnicodeEncodeError: 'ascii' codec can't encode characters in position 2599-2601: ordinal not in range(128)
However, after some google search over BeutifulSoup(ecodes) I changed it to utf-8 and then got the following error.
---------------------------------------------------------------------------
AttributeError Traceback (most recent call last)
<ipython-input-9-9c77ed07af2d> in <module>()
20
21 for ticker, cik in cik_lookup.items():
---> 22 sec_data[ticker] = get_sec_data(cik, '10-K')
<ipython-input-9-9c77ed07af2d> in get_sec_data(cik, doc_type, start, count)
11 entry.content.find('filing-type').getText(),
12 entry.content.find('filing-date').getText())
---> 13 for entry in feed.find_all('entry', recursive=False)
14 if pd.to_datetime(entry.content.find('filing-date').getText()) <= newest_pricing_data]
15
AttributeError: 'NoneType' object has no attribute 'find_all'
The project can be accessed here at the following github repo.
github repo herealso.
I would like to do some statistical analysis with Python on the live casino game called Crazy Time from Evolution Gaming. There is a website that has the data to do this: https://tracksino.com/crazytime. I want the data of the lowest table 'Spin History' to be imported into excel. However, I do not now how this can be done. Could anyone give me an idea where to start?
Thanks in advance!
Try the below code:
import json
import requests
from urllib3.exceptions import InsecureRequestWarning
requests.packages.urllib3.disable_warnings(InsecureRequestWarning)
import csv
import datetime
def scrap_history():
csv_headers = []
file_path = '' #mention your system where you have to save the file
file_name = 'spin_history.csv' # filename
page_number = 1
while True:
#Dynamic URL fetching data in chunks of 100
url = 'https://api.tracksino.com/crazytime_history?filter=&sort_by=&sort_desc=false&page_num=' + str(page_number) + '&per_page=100&period=24hours'
print('-' * 100)
print('URL created : ',url)
response = requests.get(url,verify=False)
result = json.loads(response.text) # loading data to convert in JSON.
history_data = result['data']
print(history_data)
if history_data != []:
with open(file_path + file_name ,'a+') as history:
#Headers for file
csv_headers = ['Occured At','Slot Result','Spin Result','Total Winners','Total Payout',]
csvwriter = csv.DictWriter(history, delimiter=',', lineterminator='\n',fieldnames=csv_headers)
if page_number == 1:
print('Writing CSV header now...')
csvwriter.writeheader()
#write exracted data in to csv file one by one
for item in history_data:
value = datetime.datetime.fromtimestamp(item['when'])
occured_at = f'{value:%d-%B-%Y # %H:%M:%S}'
csvwriter.writerow({'Occured At':occured_at,
'Slot Result': item['slot_result'],
'Spin Result': item['result'],
'Total Winners': item['total_winners'],
'Total Payout': item['total_payout'],
})
print('-' * 100)
page_number +=1
print(page_number)
print('-' * 100)
else:
break
Explanation:
I have implemented the above script using python requests way. The API url https://api.tracksino.com/crazytime_history?filter=&sort_by=&sort_desc=false&page_num=1&per_page=50&period=24hours extarcted from the web site itself(refer screenshot). In the very first step script will take the dynamic URL where page number is dynamic and changed upon on every iteration. For ex:- first it will be page_num = 1 then page_num = 2 and so on till all the data will get extracted.
I wrote some R scripts, and I 'd like to use snakemake to integrate them to an analysis pipeline. I almost finish this pipeline, except one of the R script. In this R script, one of the parameters is a list, like this:
group=list(A=c("a","b","c"),B=c("d","e"),C=c("f","g","h"))
I don't know how to call this kind of parameters in snakemake.
The R script and snakemake script I wrote are as follow:
R script:
library(optparse)
library(ggtree)
library(ggplot2)
library(colorspace)
# help doc
option_list=list(
make_option("--input",type="character",help="<file> input file"),
make_option("--output",type="character",help="<file> output file"),
make_option("--families",type="character",help="<list> a list containing classified families"),
make_option("--wide",type="numeric",help="<num> width of figure"),
make_option("--high",type="numeric",help="<num> height of figure"),
make_option("--labsize",type="numeric",help="<num> size of tip lable")
)
opt_parser=OptionParser(usage="\n\nName: cluster_vil.r",
description="\nDescription: This script is to virualize the result of cluster analysis.
\nContact: huisu<hsu#kangpusen.com>
\nDate: 9.5.2019",
option_list=option_list,
epilogue="Example: Rscript cluster_vil.r --input mega_IBSout_male.nwk
--output NJ_IBS_male.ggtree.pdf
--families list(Family_A=c('3005','3021','3009','3119'),Family_B=c('W','4023'),Family_C=c('810','3003'),Family_D=c('4019','1001','4015','4021'),Family_E=c('4017','3115'))
--wide 18
--high 8
--labsize 7"
)
opt=parse_args(opt_parser)
input=opt$input
output=opt$output
families=opt$families
wide=opt$wide
high=opt$high
labsize=opt$labsize
# start plot
nwk=read.tree(input)
tree=groupOTU(nwk, families)
pdf(file=output,width=wide,height=high) # 18,8 for male samples; 12,18 for all samples
ggtree(tree,aes(color=group),branch.length='none') + geom_tiplab(size=labsize) +
theme(legend.position=("left"),legend.text=element_text(size=12),legend.title=element_text(size=18),
legend.key.width=unit(0.5,"inches"),legend.key.height=unit(0.3,"inches")) +
scale_color_manual(values=c("black", rainbow_hcl(length(families)))) +
theme(plot.margin=unit(rep(2,4),'cm'))
dev.off()
snakemake:
rule cluster_virual:
input:
nwk="mega_IBS.nwk",
output:
all="mega_IBS.pdf",
params:
fam=collections.OrderedDict([('Family_A',['3005','3021','3009','3119']),
('Family_B',['W','4023']),
('Family_C',['810','3003']),
('Family_D',["4019","1001","4015","4021"]),
('Family_E',["4017","3115"])])
message:
"====cluster analysis virualization===="
shell:
"Rscript Rfunction/cluster_vil.r "
"--input {input.nwk} "
"--output {output.all} "
"--families {params.fam} "
"--wide 12 "
"--high 18 "
"--labsize 3"
So, I want to know how to properly call the write the parameter fam in snakemake.
I think in python/snakemake you can use OrderedDict to represent an R list. So:
params:
fam=list(A=c('a','b','c'),B=c('d','e'),C=c('f','g','h'))
Would be:
params:
fam= collections.OrderedDict([('A', ['a', 'b', 'c']),
('B', ['d', 'e', 'f']),
('C', ['h', 'g'])])
Of course, add import collections to the top of your snakemake file (or wherever you want to import the collections module).
I'm trying to move files to HDFS.
And this is my config file:
# Naming the components on the current agent.
FileAgent.sources = File
FileAgent.channels = MemChannel
FileAgent.sinks = HDFS
#configuring the souce
FileAgent.sources.File.type = spooldir
FileAgent.sources.File.spoolDir = /usr/lib/flume/spooldir
# Describing/Configuring the sink
FileAgent.sinks.HDFS.type = hdfs
FileAgent.sinks.HDFS.hdfs.path = hdfs://192.168.1.31:8020/user/Flume/
FileAgent.sinks.HDFS.hdfs.fileType = DataStream
FileAgent.sinks.HDFS.hdfs.writeFormat = Text
FileAgent.sinks.HDFS.hdfs.batchSize = 1000
FileAgent.sinks.HDFS.hdfs.rollSize = 0
FileAgent.sinks.HDFS.hdfs.rollCount = 10000
# Describing/Configuring the channel
FileAgent.channels.MemChannel.type = memory
FileAgent.channels.MemChannel.capacity = 10000
FileAgent.channels.MemChannel.transactionCapacity = 100
# Binding the source and sink to the channel
FileAgent.sources.File.channels = MemChannel
FileAgent.sinks.HDFS.channel = MemChannel
And it works well.But the files in hdfs have a name like this: FlumeData.1460976871742
In my case I want to keep the original file name.
How to keep the original file name in hdfs?
For example, if I have a file test.txt in the directory /usr/lib/flume/spooldir, I will have a file test.txt in HDFS.