distribution output in a numpy array does not fit the original - plot

I have created a fit via the Gamma_3P distribution with alpha, beta and gamma.
The Problem is, that the output y values(red) does not match the distribution(blue).
Thank you in advance for Help.
Code Following:
#test numpy plot
from reliability.Distributions import Gamma_Distribution
from reliability.Fitters import Fit_Gamma_3P
import matplotlib.pyplot as plt
x_min = 0
x_max = 10000
alpha_G = 1827.9715783463666
beta_G = 0.5175119001035541
gamma_G = 0.9999
Gamma_fit_created = Gamma_Distribution(alpha=alpha_G,beta=beta_G, gamma=gamma_G)
y_values = Gamma_fit_created.SF(xmin = x_min, xmax = x_max, label='fit function',color='green', show_plot = False)
#since the output array is 200 values within the limits, I adapted the x values
x_steps = (x_max - x_min)/len(y_values)
x_values = np.arange(x_min, x_max,x_steps)
x_values_plot = x_values[1:]
y_values_plot = y_values[1:]
plt.plot(x_values_plot, y_values_plot, label='fit function created sf 2',color='red')
Gamma_fit_created.SF(xmin = x_min, xmax = x_max, label='fit function correct',color='blue')
plt.xlim(0,10000)
plt.ylim(0,1)
#plt.plot(Gamma_fit_created.SF(xmin = 0, xmax = 10000, label='fit function created sf',color='blue'))
plt.legend()
plt.show()
Code
This is my ouput. The functions should be the same, but they are differenasdas

Related

How to plot learning curve for my gan in python

I want to plot the learning curve (loss & accuracy plot) for my gan? I try different solution but still can't plot the learning curve for my gan. I tried to use fit and history function but I get error and can't plot loss & accuracy curve. If anyone can help?
I run my code in google colab. Since I only train the discriminator I tried to use fit & history function in the definition of discriminator & gan but still get error
from tensorflow.keras.layers import Dense, Conv2DTranspose, LeakyReLU, Reshape, BatchNormalization, Activation, Conv2D
from tensorflow.keras.models import Model, Sequential
import matplotlib.pyplot as plt
import numpy as np
from tensorflow.keras.layers import Conv2D, Flatten, Dropout
from tensorflow.keras import optimizers
import tensorflow as tf
def image_generator():
generator = Sequential()
generator.add(Dense(256*4*4, input_shape = (100,)))
#generator.add(BatchNormalization())
generator.add(LeakyReLU())
generator.add(Reshape((4,4,256)))
generator.add(Conv2DTranspose(128,kernel_size=3, strides=2, padding = "same"))
#generator.add(BatchNormalization())
generator.add(LeakyReLU(alpha=0.2))
generator.add(Conv2DTranspose(128,kernel_size=3, strides=2, padding = "same"))
#generator.add(BatchNormalization())
generator.add(LeakyReLU(alpha=0.2))
generator.add(Conv2DTranspose(128,kernel_size=3, strides=2, padding = "same"))
#generator.add(BatchNormalization())
generator.add(LeakyReLU(alpha=0.2))
generator.add(Conv2D(3,kernel_size=3, padding = "same", activation='tanh'))
return(generator)
model_generator = image_generator()
model_generator.summary()
# Define input data
def generate_data_entry(n_samples):
X = np.random.randn(100 * n_samples)
X = X.reshape(n_samples, 100)
return X
def create_data_fake(model_generator, n_samples):
input = generate_data_entry(n_samples)
X = model_generator.predict(input)
y = np.zeros((n_samples, 1))
return X,y
number_samples = 4
X,_ = create_data_fake(model_generator, number_samples)
# Visualize results
for i in range(number_samples):
plt.subplot(2, 2, 1 + i)
plt.axis('off')
plt.imshow(X[i])
def images_discriminator():
discriminator = Sequential()
discriminator.add(Conv2D(64, kernel_size=3, padding = "same", input_shape = (32,32,3)))
discriminator.add(LeakyReLU(alpha=0.2))
#discriminator.add(Dropout(0.2))
discriminator.add(Conv2D(128, kernel_size=3,strides=(2,2), padding = "same"))
discriminator.add(LeakyReLU(alpha=0.2))
#discriminator.add(Dropout(0.2))
discriminator.add(Conv2D(128, kernel_size=3,strides=(2,2), padding = "same"))
discriminator.add(LeakyReLU(alpha=0.2))
#discriminator.add(Dropout(0.2))
discriminator.add(Conv2D(256, kernel_size=3, strides=(2,2), padding = "same"))
discriminator.add(LeakyReLU(alpha=0.2))
#discriminator.add(Dropout(0.2))
discriminator.add(Flatten())
discriminator.add(Dropout(0.4))
discriminator.add(Dense(1, activation='sigmoid'))
opt = tf.keras.optimizers.Adam(learning_rate=0.0002 ,beta_1=0.5)
discriminator.compile(loss='binary_crossentropy', optimizer= opt , metrics = ['accuracy'])
return(discriminator)
model_discriminator = images_discriminator()
model_discriminator.summary()
from tensorflow.keras.datasets import cifar10
import random
import tensorflow as tf
import numpy as np
from tensorflow.keras import backend
import pandas as pd
import matplotlib.pyplot as plt
import keras
%matplotlib inline
(Xtrain, Ytrain), (testx, testy) = cifar10.load_data()
def load_images():
(Xtrain, Ytrain), (testx, testy) = cifar10.load_data()
# we stayed with the dogs
indice = np.where(Ytrain == 0)
indice = indice[0]
Xtrain = Xtrain[indice, :,:,:]
# Normalize the data
X = Xtrain.astype('float32')
X = (X - 127.5) / 127.5
return X
print(load_images().shape)
def load_data_reales(dataset, n_samples):
ix = np.random.randint(0, dataset.shape[0], n_samples)
X = dataset[ix]
y = np.ones((n_samples, 1))
return X,y
def load_data_fake(n_samples):
X = np.random.rand(32 * 32 * 3 * n_samples)
X = -1 + X * 2
X = X.reshape((n_samples, 32,32,3))
y = np.zeros((n_samples, 1))
return X,y
def train_discriminator(model, dataset, n_iterations=20, batch = 128):
medium_batch = int(batch/2)
for i in range(n_iterations):
X_real, y_real = load_data_reales(dataset, medium_batch)
los_real, acc_real = model.train_on_batch(X_real, y_real)
X_fake, y_fake = load_data_fake(medium_batch)
los_fake, acc_fake = model.train_on_batch(X_fake, y_fake)
print(str(i+1) + ' Real:' + str(acc_real*100) + ', Fake:' + str(acc_fake*100))
dataset = load_images()
train_discriminator(model_discriminator, dataset)
def my_loss_function(x_real,x_fake):
la=tf.keras.losses.BinaryCrossentropy()
my_loss_function = la(x_real, x_fake)
return my_loss_function
def create_gan(discriminator, generator):
discriminator.trainable=False
gan = Sequential()
gan.add(generator)
gan.add(discriminator)
opt = tf.keras.optimizers.Adam(learning_rate=0.0002,beta_1=0.5)
return gan
gan = create_gan(model_discriminator,model_generator)
gan.summary()
def show_images_generated(data_fake, epoch):
now = datetime.now()
now = now.strftime("%Y%m%d_%H%M%S")
# we make the data go from 0 to 1
data_fake = (data_fake + 1) / 2.0
for i in range(10):
plt.imshow(data_fake[i])
plt.axis('off')
name = str(epoch) + '_generated_image_' + str(i) + '.png'
plt.savefig(name, bbox_inches='tight')
plt.close()
def evaluate_and_save(model_generator, epoch, medium_dataset):
# We save the model
now = datetime.now()
now = now.strftime("%Y%m%d_%H%M%S")
name = str(epoch) + '_' + str(now)+"_model_generator_" + '.h5'
model_generator.save(name)
# We generate new data
X_real,Y_real = load_data_reales(dataset, medium_dataset)
X_fake, Y_fake = create_data_fake(model_generator,medium_dataset)
# We evaluate the model
_, acc_real = model_discriminator.evaluate(X_real, Y_real)
_, acc_fake = model_discriminator.evaluate(X_fake, Y_fake)
print('Acc Real:' + str(acc_real*100) + '% Acc Fake:' + str(acc_fake*100)+'%')
def training(data, model_generator, model_discriminator, epochs, n_batch, beginning = 0):
dimension_batch = int(data.shape[0]/n_batch)
medium_dataset = int(n_batch/2)
# We iterate over the epochs
for epoch in range(beginning, beginning + epochs):
# We iterate over all batches
for batch in range(n_batch):
# We load all the real data
X_real,Y_real = load_data_reales(dataset, medium_dataset)
# We train the discriminator withEnrenamos discriminator with data reales
cost_discriminator_real, _ = model_discriminator.train_on_batch(X_real, Y_real)
X_fake, Y_fake = create_data_fake(model_generator,medium_dataset)
cost_discriminator_fake, _ = model_discriminator.train_on_batch(X_fake, Y_fake)
# We generate input images for the GAN
X_gan = generate_data_entry(medium_dataset)
Y_gan = np.ones((medium_dataset, 1))
# We train the GAN with fake data
cost_gan = gan.train_on_batch(X_gan, Y_gan)
# Every 10 epochs we show the results and cost
if (epoch+1) % 10 == 0:
evaluate_and_save(model_generator,epoch = epoch, medium_dataset= medium_dataset)
show_images_generated(X_fake, epoch = epoch)
training(dataset, model_generator, model_discriminator, epochs = 10, n_batch=128, beginning = 0)
X_fake, _ = create_data_fake(n_samples=49, model_generator=model_generator)
X_fake = (X_fake+1)/2
for i in range(49):
plt.subplot(7,7,i+1)
plt.axis('off')
plt.imshow(X_fake[i])
# We generate new data
X_real1,Y_real1 = load_data_reales(dataset, int(15))
X_fake1, Y_fake1 = create_data_fake(model_generator,int(15))
# We evaluate the model
_, acc_real = model_discriminator.evaluate(X_real1, Y_real1)
_, acc_fake = model_discriminator.evaluate(X_fake1, Y_fake1)
print('Acc Real:' + str(acc_real*100) + '% Acc Fake:' + str(acc_fake*100)+'%')

Plot Gaze Heat Map on PNG

With this code I want to plot a heat map from eye tracking gaze data (x, y coordinates on screen) on top of a png. I got really close with other posts here in the forum. The only problem left are some dots/blurry patches on the output image (see link). Maybe somebody can help?
import numpy as np
import matplotlib.pyplot as plt
from scipy.ndimage import gaussian_filter
import pandas as pd
from PIL import Image
with Image.open("/Users/florianteichmann/Desktop/Gaze_Scratch_Paradigm/stimuli_current/trial_image/32space_drop.png") as img3:
w = img3.width
h = img3.height
def myplot(x, y, s, bins=1000):
heatmap, xedges, yedges = np.histogram2d(x, y, bins=bins)
heatmap = gaussian_filter(heatmap, sigma=s)
extent = [xedges[0], xedges[-1], yedges[0], yedges[-1]]
return heatmap.T, extent
def transparent_cmap(cmap, N=255):
mycmap = cmap
mycmap._init()
mycmap._lut[:,-1] = np.linspace(0, 1, N+4)
return mycmap
# Generate some test data
df = pd.read_csv('file-path')
x = (df['X'])
y = (df['Y'])
mycmap = transparent_cmap(plt.cm.Greens)
img2 = plt.imread('file-path')
#sigmas = [0, 16, 32, 64]
s = 64
fig, ax = plt.subplots()
plt.axis([0, w, 0, h])
img, extent = myplot(x, y, s)
ax.imshow(img2, extent=[0, w, 0, h])
#ax.plot(img) #cmap=cm.jet)
ax.imshow(img, extent=[0, w, 0, h], origin='lower', cmap=mycmap) #cmap=cm.jet) origin='lower'
plt.show()
output image

Custom R visual times out in powerBI

I'm attempting to get a r visualization running in PowerBI. It runs fine in R, but for some reason it never finishes loading in PowerBI (no error message, just the timeout screen after 5 minutes). After some experimenting, I've noticed that if I remove one plotly overlay from the create and save widget section, it will load fine. It doesn't matter which one.
I am new to R and powerBi, so any advice on a workaround would be really appreciated.
source('./r_files/flatten_HTML.r')
############### Library Declarations ###############
libraryRequireInstall("ggplot2");
libraryRequireInstall("plotly");
####################################################
################### Actual code ####################
# plot histogram of risk density using monte carlo output
x = Values[,1]; #grab first column of dataframe as dataframe
# create CDF function and overlay onto histogram
cdf = ecdf(x);
# calculate mean cordinates to draw a mean line for selected data
meancordinates = function(xdata) {
v = sum(xdata)
meanxcord = v/length(xdata)
meancord = list(meanxcord = meanxcord, meanycord = cdf(meanxcord))
return(meancord)
};
mean = meancordinates(x);
# calculate median cordinates to draw a median line for selected data
mediancordinates = function(xdata) {
medianxcord = median(xdata)
mediancord = list(medianxcord = medianxcord, medianycord = cdf(medianxcord))
return(mediancord)
};
median = mediancordinates(x)
# calculate the 80% cordinates to draw a 80% line for selected data
eightycordinates = function(xdata) {
eightyxcord = x[which(abs(cdf(xdata)-0.80) == min(abs(cdf(xdata)-0.80)))]
eightycord = list(eightyxcord = eightyxcord, eightyycord = cdf(eightyxcord))
return(eightycord);
}
eighty = eightycordinates(x);
####################################################
############# Create and save widget ###############
p = plot_ly(x = x, type = "histogram", histnorm = "probability density", name = "Histogram")
p = p %>% add_segments(
x = median$medianxcord, xend = median$medianxcord,
y = 0, yend = median$medianycord,
name = "Median")
p = p %>% add_segments(
x = eighty$eightyxcord, xend = eighty$eightyxcord,
y = 0, yend = eighty$eightyycord,
name = "80%")
p = p %>% add_segments(
x = mean$meanxcord, xend = mean$meanxcord,
y = 0, yend = mean$meanycord,
name = "Mean")
p = p %>% add_lines(x = x, y = cdf(x), name = "CDF");
internalSaveWidget(p, 'out.html');
####################################################

Export Raster from R-INLA

so I am in dire need of help. I have finally managed to construct my R-INLA model and get it to graph as needed. via the code below:
First I create the stacks (note this is the very end of my INLA process, the mesh etc has already been done)
stk.abdu = inla.stack(data = list(y = 1, e = 0), A = list(abdu.mat, 1),tag = 'abdu', effects = list(list(i = 1:sc.mesh.5$n), data.frame(Intercept = 1,dwater=winter.abdu$dwater,elev=winter.abdu$elev,forest=winter.abdu$forest,developed=winter.abdu$developed,openwater=winter.abdu$OpenWater,barren=winter.abdu$barren,shrubland=winter.abdu$shrubland,herb=winter.abdu$herb,planted=winter.abdu$planted,wetland=winter.abdu$wetland,dist=winter.abdu$dwater)))
stk.quad = inla.stack(data = list(y = 0, e = 0.1), A = list(quad.mat, 1),tag = 'quad', effects = list(list(i = 1:sc.mesh.5$n), data.frame(Intercept = 1,dwater=dummy$dwater,elev=dummy$elev,forest=dummy$forest,developed=dummy$developed,openwater=dummy$openwater,barren=dummy$barren,shrubland=dummy$shrubland,herb=dummy$herb,planted=dummy$planted,wetland=dummy$wetland,dist=dummy$dwater)))
stk.prd<-inla.stack(data = list(y = NA), A = list(Aprd, 1),tag = 'prd', effects = list(list(i = 1:sc.mesh.5$n), data.frame(Intercept = 1,dwater=prddf2$dwater,elev=prddf2$elev,forest=prddf2$forest,developed=prddf2$developed,openwater=prddf2$openwater,barren=prddf2$barren,shrubland=prddf2$shrubland,herb=prddf2$herb,planted=prddf2$planted,wetland=prddf2$wetland,dist=prddf2$dwater)))
stk.all.prd = inla.stack(stk.abdu,stk.quad,stk.prd)
Next I fit my model
ft.inla.prd<-inla(y ~ 0 + Intercept + elev + dwater + forest+ developed + f(inla.group(dist,n=50,method="quantile"),model="rw1",scale.model=TRUE)+f(i,model=sc.spde),family="binomial",data=inla.stack.data(stk.all.prd),control.predictor = list(A = inla.stack.A(stk.all.prd),compute=TRUE),E=inla.stack.data(stk.all.prd)$e,control.compute=list(dic = TRUE),control.fixed=list(expand.factor.strategy="INLA"))
Then I change the predicted values from logit to probabilities
ft.inla.prd$newfield <- exp(ft.inla.prd$summary.random$i$mean)/(1 + exp(ft.inla.prd$summary.random$i$mean))
And finally I use inla.mesh.project and levelplot to create my image
xmean <- inla.mesh.project(projgrid,ft.inla.prd$newfield)
levelplot(xmean, col.regions=topo.colors(99), main='Probability of Presence',xlab='', ylab='', scales=list(draw=FALSE))
So my problem is that I now want to export this data (what is projected as the graph) as a raster so that I can work with it in ArcGIS. However, I have not been able to find a way to do so.
Any input is greatly appreciated

Running Bokeh in Jupyter notebook as opposed to Bokeh server

I am trying to run the following example from the Bokeh docs in a Jupyter notebook as opposed to through the Bokeh server, however I can't figure out what to change in order for this to work? Right now I simply get a blank output. I feel like it should be easy and I'm just missing something...Here is the code from the Bokeh docs.
import numpy as np
from bokeh.models import BoxSelectTool, LassoSelectTool, Paragraph
from bokeh.plotting import figure, hplot, vplot
# create three normal population samples with different parameters
x1 = np.random.normal(loc=5.0, size=400) * 100
y1 = np.random.normal(loc=10.0, size=400) * 10
x2 = np.random.normal(loc=5.0, size=800) * 50
y2 = np.random.normal(loc=5.0, size=800) * 10
x3 = np.random.normal(loc=55.0, size=200) * 10
y3 = np.random.normal(loc=4.0, size=200) * 10
x = np.concatenate((x1, x2, x3))
y = np.concatenate((y1, y2, y3))
TOOLS="pan,wheel_zoom,box_select,lasso_select"
# create the scatter plot
p = figure(tools=TOOLS, plot_width=600, plot_height=600, title=None, min_border=10, min_border_left=50)
r = p.scatter(x, y, size=3, color="#3A5785", alpha=0.6)
p.select(BoxSelectTool).select_every_mousemove = False
p.select(LassoSelectTool).select_every_mousemove = False
# create the horizontal histogram
hhist, hedges = np.histogram(x, bins=20)
hzeros = np.zeros(len(hedges)-1)
hmax = max(hhist)*1.1
LINE_ARGS = dict(color="#3A5785", line_color=None)
ph = figure(toolbar_location=None, plot_width=p.plot_width, plot_height=200, x_range=p.x_range,
y_range=(-hmax, hmax), title=None, min_border=10, min_border_left=50)
ph.xgrid.grid_line_color = None
ph.quad(bottom=0, left=hedges[:-1], right=hedges[1:], top=hhist, color="white", line_color="#3A5785")
hh1 = ph.quad(bottom=0, left=hedges[:-1], right=hedges[1:], top=hzeros, alpha=0.5, **LINE_ARGS)
hh2 = ph.quad(bottom=0, left=hedges[:-1], right=hedges[1:], top=hzeros, alpha=0.1, **LINE_ARGS)
# create the vertical histogram
vhist, vedges = np.histogram(y, bins=20)
vzeros = np.zeros(len(vedges)-1)
vmax = max(vhist)*1.1
th = 42 # need to adjust for toolbar height, unfortunately
pv = figure(toolbar_location=None, plot_width=200, plot_height=p.plot_height+th-10, x_range=(-vmax, vmax),
y_range=p.y_range, title=None, min_border=10, min_border_top=th)
pv.ygrid.grid_line_color = None
pv.xaxis.major_label_orientation = -3.14/2
pv.quad(left=0, bottom=vedges[:-1], top=vedges[1:], right=vhist, color="white", line_color="#3A5785")
vh1 = pv.quad(left=0, bottom=vedges[:-1], top=vedges[1:], right=vzeros, alpha=0.5, **LINE_ARGS)
vh2 = pv.quad(left=0, bottom=vedges[:-1], top=vedges[1:], right=vzeros, alpha=0.1, **LINE_ARGS)
pv.min_border_top = 80
pv.min_border_left = 0
ph.min_border_top = 10
ph.min_border_right = 10
p.min_border_right = 10
layout = vplot(hplot(p, pv), hplot(ph, Paragraph(width=200)), width=800, height=800)
def update(attr, old, new):
inds = np.array(new['1d']['indices'])
if len(inds) == 0 or len(inds) == len(x):
hhist1, hhist2 = hzeros, hzeros
vhist1, vhist2 = vzeros, vzeros
else:
neg_inds = np.ones_like(x, dtype=np.bool)
neg_inds[inds] = False
hhist1, _ = np.histogram(x[inds], bins=hedges)
vhist1, _ = np.histogram(y[inds], bins=vedges)
hhist2, _ = np.histogram(x[neg_inds], bins=hedges)
vhist2, _ = np.histogram(y[neg_inds], bins=vedges)
hh1.data_source.data["top"] = hhist1
hh2.data_source.data["top"] = -hhist2
vh1.data_source.data["right"] = vhist1
vh2.data_source.data["right"] = -vhist2
r.data_source.on_change('selected', update)
Thanks in advance!
You have to run the output_notebook command first to set notebook operation (https://docs.bokeh.org/en/latest/docs/user_guide/notebook.html), then use the show command
tl;dr
from bokeh.io import output_notebook, show
output_notebook()
... make plots and stuff ...
show(plots)

Resources