i want to plot a contour of PBLH difference between 2 wrf-chem simulations. I have the netcdf means (attached files), and i want to draw contour of 95% significance levels, but the script did not work, can you give your suggestions please?
"Error: scalar_field: If the input data is 1-dimensional, you must set sfXArray and sfYArray to 1-dimensional arrays of the same length.
warning:create: Bad HLU id passed to create, ignoring it"
i'm explecting a contour plot with Grey shaded areas indicate regions with less than 95 % significance.
here is the code. You can test it with any two WRF netcdf files:
`;----------------------------------------------------------------------
; contoursym_1.ncl
;
; Concepts illustrated:
; - Using a symmetric color map
; - Using a blue-red color map
; - Explicitly setting contour levels
;----------------------------------------------------------------------
;
; These files are loaded by default in NCL V6.2.0 and newer
load "$NCARG_ROOT/lib/ncarg/nclscripts/csm/gsn_code.ncl"
load "$NCARG_ROOT/lib/ncarg/nclscripts/csm/gsn_csm.ncl"
load "$NCARG_ROOT/lib/ncarg/nclscripts/wrf/WRF_contributed.ncl"
; This file still has to be loaded manually
load "$NCARG_ROOT/lib/ncarg/nclscripts/csm/shea_util.ncl"
begin
;*****************
;-- load data
;*****************
;specify file names (input&output, netCDF)
pathin = "./" ; directory
fin1 = "15-25-omet.nc" ; input file name #1
fin2 = "15-25-wrfda.nc" ; input file name #2
fout = "signif_pblh_omet-wrfda" ; output file name
foutnc = fout+".nc"
f = addfile ("15-25-omet.nc", "r")
; open input files
in1 = addfile(pathin+fin1,"r")
in2 = addfile(pathin+fin2,"r")
; read data
tmp1 = in1->PBLH
tmp2 = in2->PBLH
x = f->PBLH(0,:,:)
diff=tmp1-tmp2
printVarSummary(tmp1)
printVarSummary(tmp2)
;****************************************************
; calculate probabiliites
;****************************************************
;t-test
res1=True
xtmp=tmp1(XTIME|:,south_north|:, west_east|:)
ytmp = tmp2(XTIME|:,south_north|:, west_east|:)
aveX = dim_avg_Wrap(xtmp)
aveY = dim_avg_Wrap(ytmp)
varX = dim_variance(xtmp)
varY = dim_variance(ytmp)
sX = dimsizes(xtmp&XTIME)
sY = dimsizes(ytmp&XTIME)
print(sX)
print(sY)
alphat = 100.*(1. - ttest(aveX,varX,sX, aveY,varY,sY, True, False))
;aveX = where(alphat.lt.95.,aveX#FillValue, aveX)
;print(alphat)
;*********************
;---Start the graphics
;**********************
wks = gsn_open_wks("ps" ,"Bias_gray_F") ; ps,pdf,x11,ncgm,eps
res = True
res#gsnMaximize = True ; uncomment to maximize size
res#gsnSpreadColors = True ; use full range of colormap
res#cnFillOn = True ; color plot desired
res#cnLinesOn = False ; turn off contour lines
res#cnLineLabelsOn = True ; turn off contour labels
res#cnLineLabelsOn = True ; turn on line labels
res#lbOrientation = "Vertical"
res#lbLabelPosition = "Right" ; label position
res#tiMainFontHeightF = 0.025
res#lbBoxEndCapStyle = "TriangleBothEnds" ; triangle label bar
;************************************************
; Use WRF_contributed procedure to set map resources
;************************************************
res = True
WRF_map_c(f, res, 0) ; reads info from file
;************************************************
; if appropriate, set True for native mapping (faster)
; set False otherwise
;************************************************
res#tfDoNDCOverlay = True
;************************************************
; associate the 2-dimensional coordinates to the variable for plotting
; only if non-native plot
;************************************************
if (.not.res#tfDoNDCOverlay) then
x#lat2d = f->XLAT(0,:,:) ; direct assignment
x#lon2d = f->XLONG(0,:,:)
end if
;************************************************
; Turn on lat / lon labeling
;************************************************
res#pmTickMarkDisplayMode = "Always" ; turn on tickmarks
;res#tmXTOn = False ; turn off top labels
;res#tmYROn = False ; turn off right labels
;************************************************
; Loop over all times and levels ( uncomment )
; Demo: one arbitrarily closen time and level
;************************************************
dimx = dimsizes(x) ; dimensions of x
ntim = dimx(0) ; number of time steps
klev = dimx(1) ; number of "bottom_top" levels
nt = 0 ; arbitrary time
kl = 6 ; " level
opt=True
opts=True
res1=True
res = opts ; Use basic options for this field
opts#MainTitle = "OMET-FBDA"
opts#InitTime = False ; Do not plot time or footers
opts#Footer = False
plot0 = gsn_csm_contour_map(wks,diff(0,:,:),res ) ; define plot 0
pval = gsn_csm_contour(wks,alphat(0,:),res1) ;-- this adds a contour line around the stippling
opt#gsnShadeMid="gray62"
pval = gsn_contour_shade(pval,0.05,1.00,opt) ;-- this adds the stippling for all pvalues <= 0.05
overlay(plot0,pval)
draw(plot0)
frame(wks)
end`
Related
from torchvision_starter.engine import train_one_epoch, evaluate
from torchvision_starter import utils
import multiprocessing
import time
n_cpu = multiprocessing.cpu_count()
device = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu')
_ = model.to(device)
params = [p for p in model.parameters() if p.requires_grad]
optimizer = torch.optim.Adam(model.parameters(), lr=0.00001)
lr_scheduler = torch.optim.lr_scheduler.StepLR(optimizer,
step_size=3,
gamma=0.2,
verbose=True
)
# Let's train for 10 epochs
num_epochs = 1
start = time.time()
for epoch in range(10, 10 + num_epochs):
# train for one epoch, printing every 10 iterations
train_one_epoch(model, optimizer, data_loaders['train'], device, epoch, print_freq=10)
# update the learning rate
lr_scheduler.step()
# evaluate on the validation dataset
evaluate(model, data_loaders['valid'], device=device)
stop = time.time()
print(f"\n\n{num_epochs} epochs in {stop - start} s ({(stop-start) / 3600:.2f} hrs)")
Before I move on to this part, everything is OK. But after I run the part, the error is like below:
I have tried to add drop_last to the helper.py's function like:
data_loaders["train"] = torch.utils.data.DataLoader(
train_data,
batch_size=batch_size,
sampler=train_sampler,
num_workers=num_workers,
collate_fn=utils.collate_fn,
drop_last=True
)
But it doesn't work. By the way, the torch and torchvision are compatible and Cuda is available.
I wonder how to fix it.
The get_data_loaders function:
def get_data_loaders(
folder, batch_size: int = 2, valid_size: float = 0.2, num_workers: int = -1, limit: int = -1, thinning: int = None
):
"""
Create and returns the train_one_epoch, validation and test data loaders.
:param foder: folder containing the dataset
:param batch_size: size of the mini-batches
:param valid_size: fraction of the dataset to use for validation. For example 0.2
means that 20% of the dataset will be used for validation
:param num_workers: number of workers to use in the data loaders. Use -1 to mean
"use all my cores"
:param limit: maximum number of data points to consider
:param thinning: take every n-th frame, instead of all frames
:return a dictionary with 3 keys: 'train_one_epoch', 'valid' and 'test' containing respectively the
train_one_epoch, validation and test data loaders
"""
if num_workers == -1:
# Use all cores
num_workers = multiprocessing.cpu_count()
# We will fill this up later
data_loaders = {"train": None, "valid": None, "test": None}
# create 3 sets of data transforms: one for the training dataset,
# containing data augmentation, one for the validation dataset
# (without data augmentation) and one for the test set (again
# without augmentation)
data_transforms = {
"train": get_transform(UdacitySelfDrivingDataset.mean, UdacitySelfDrivingDataset.std, train=True),
"valid": get_transform(UdacitySelfDrivingDataset.mean, UdacitySelfDrivingDataset.std, train=False),
"test": get_transform(UdacitySelfDrivingDataset.mean, UdacitySelfDrivingDataset.std, train=False),
}
# Create train and validation datasets
train_data = UdacitySelfDrivingDataset(
folder,
transform=data_transforms["train"],
train=True,
thinning=thinning
)
# The validation dataset is a split from the train_one_epoch dataset, so we read
# from the same folder, but we apply the transforms for validation
valid_data = UdacitySelfDrivingDataset(
folder,
transform=data_transforms["valid"],
train=True,
thinning=thinning
)
# obtain training indices that will be used for validation
n_tot = len(train_data)
indices = torch.randperm(n_tot)
# If requested, limit the number of data points to consider
if limit > 0:
indices = indices[:limit]
n_tot = limit
split = int(math.ceil(valid_size * n_tot))
train_idx, valid_idx = indices[split:], indices[:split]
# define samplers for obtaining training and validation batches
train_sampler = torch.utils.data.SubsetRandomSampler(train_idx)
valid_sampler = torch.utils.data.SubsetRandomSampler(valid_idx) # =
# prepare data loaders
data_loaders["train"] = torch.utils.data.DataLoader(
train_data,
batch_size=batch_size,
sampler=train_sampler,
num_workers=num_workers,
collate_fn=utils.collate_fn,
drop_last=True
)
data_loaders["valid"] = torch.utils.data.DataLoader(
valid_data, # -
batch_size=batch_size, # -
sampler=valid_sampler, # -
num_workers=num_workers, # -
collate_fn=utils.collate_fn,
drop_last=True
)
# Now create the test data loader
test_data = UdacitySelfDrivingDataset(
folder,
transform=data_transforms["test"],
train=False,
thinning=thinning
)
if limit > 0:
indices = torch.arange(limit)
test_sampler = torch.utils.data.SubsetRandomSampler(indices)
else:
test_sampler = None
data_loaders["test"] = torch.utils.data.DataLoader(
test_data,
batch_size=batch_size,
shuffle=False,
num_workers=num_workers,
sampler=test_sampler,
collate_fn=utils.collate_fn,
drop_last=True
# -
)
return data_loaders
class UdacitySelfDrivingDataset(torch.utils.data.Dataset):
# Mean and std of the dataset to be used in nn.Normalize
mean = torch.tensor([0.3680, 0.3788, 0.3892])
std = torch.tensor([0.2902, 0.3069, 0.3242])
def __init__(self, root, transform, train=True, thinning=None):
super().__init__()
self.root = os.path.abspath(os.path.expandvars(os.path.expanduser(root)))
self.transform = transform
# load datasets
if train:
self.df = pd.read_csv(os.path.join(self.root, "labels_train.csv"))
else:
self.df = pd.read_csv(os.path.join(self.root, "labels_test.csv"))
# Index by file id (i.e., a sequence of the same length as the number of images)
codes, uniques = pd.factorize(self.df['frame'])
if thinning:
# Take every n-th rows. This makes sense because the images are
# frames of videos from the car, so we are essentially reducing
# the frame rate
thinned = uniques[::thinning]
idx = self.df['frame'].isin(thinned)
print(f"Keeping {thinned.shape[0]} of {uniques.shape[0]} images")
print(f"Keeping {idx.sum()} objects out of {self.df.shape[0]}")
self.df = self.df[idx].reset_index(drop=True)
# Recompute codes
codes, uniques = pd.factorize(self.df['frame'])
self.n_images = len(uniques)
self.df['image_id'] = codes
self.df.set_index("image_id", inplace=True)
self.classes = ['car', 'truck', 'pedestrian', 'bicyclist', 'light']
self.colors = ['cyan', 'blue', 'red', 'purple', 'orange']
#property
def n_classes(self):
return len(self.classes)
def __getitem__(self, idx):
if idx in self.df.index:
row = self.df.loc[[idx]]
else:
return KeyError(f"Element {idx} not in dataframe")
# load images fromm file
img_path = os.path.join(self.root, "images", row['frame'].iloc[0])
img = Image.open(img_path).convert("RGB")
# Exclude bogus boxes with 0 height or width
h = row['ymax'] - row['ymin']
w = row['xmax'] - row['xmin']
filter_idx = (h > 0) & (w > 0)
row = row[filter_idx]
# get bounding box coordinates for each mask
boxes = row[['xmin', 'ymin', 'xmax', 'ymax']].values
# convert everything into a torch.Tensor
boxes = torch.as_tensor(boxes, dtype=torch.float32)
# get the labels
labels = torch.as_tensor(row['class_id'].values, dtype=int)
image_id = torch.tensor([idx])
area = (boxes[:, 3] - boxes[:, 1]) * (boxes[:, 2] - boxes[:, 0])
# assume no crowd for everything
iscrowd = torch.zeros((row.shape[0],), dtype=torch.int64)
target = {}
target["boxes"] = boxes
target["labels"] = labels
target["image_id"] = image_id
target["area"] = area
target["iscrowd"] = iscrowd
if self.transform is not None:
img, target = self.transform(img, target)
return img, target
def __len__(self):
return self.n_images
def plot(self, idx, renormalize=True, predictions=None, threshold=0.5, ax=None):
image, label_js = self[idx]
if renormalize:
# Invert the T.Normalize transform
unnormalize = T.Compose(
[
T.Normalize(mean = [ 0., 0., 0. ], std = 1 / type(self).std),
T.Normalize(mean = -type(self).mean, std = [ 1., 1., 1. ])
]
)
image, label_js = unnormalize(image, label_js)
if ax is None:
fig, ax = plt.subplots(figsize=(8, 8))
_ = ax.imshow(torch.permute(image, [1, 2, 0]))
for i, box in enumerate(label_js['boxes']):
xy = (box[0], box[1])
h, w = (box[2] - box[0]), (box[3] - box[1])
r = patches.Rectangle(xy, h, w, fill=False, color=self.colors[label_js['labels'][i]-1], lw=2, alpha=0.5)
ax.add_patch(r)
if predictions is not None:
# Make sure the predictions are on the CPU
for k in predictions:
predictions[k] = predictions[k].detach().cpu().numpy()
for i, box in enumerate(predictions['boxes']):
if predictions['scores'][i] > threshold:
xy = (box[0], box[1])
h, w = (box[2] - box[0]), (box[3] - box[1])
r = patches.Rectangle(xy, h, w, fill=False, color=self.colors[predictions['labels'][i]-1], lw=2, linestyle=':')
ax.add_patch(r)
_ = ax.axis("off")
return ax
I have a netcdf variable, namely mesh2d_sa1 which contains an attribute coordinates. But when I tried to call this attribute by ds.mesh2d_sa1.attrs["coordinates"], it is not found. The following is an extract of the output of ncdump -h xxxxxxx.nc, which confirms the existence of the attribute of coordinates.
double mesh2d_sa1(time, mesh2d_nFaces, mesh2d_nLayers) ;
mesh2d_sa1:mesh = "mesh2d" ;
mesh2d_sa1:location = "face" ;
mesh2d_sa1:coordinates = "mesh2d_face_x mesh2d_face_y" ;
mesh2d_sa1:cell_methods = "mesh2d_nFaces: mean" ;
mesh2d_sa1:standard_name = "sea_water_salinity" ;
mesh2d_sa1:long_name = "Salinity in flow element" ;
mesh2d_sa1:units = "1e-3" ;
mesh2d_sa1:grid_mapping = "wgs84" ;
mesh2d_sa1:_FillValue = -999. ;
The coordinate attribute can be accessed via ds.mesh2d_sa1.encoding['coordinates']
I want to clip/mask raster image (500meters resolution) by another raster images (10 km resolution) using IDL Programming after the clip/mask process image should be in 500 meters resolution. I have 365 images pair and I want to a process by IDL programming. Can anybody write this code in IDL?
Thanks in advance.
#MrFuppes
Initially, I write one code in IDL which able to mask/clip the same resolution images because those are in same dimension.
But I cannot write the mask function for different resolution where is the problem in dimension and resolution.
In my code:
Band 1(500 meters) is my base images and I want to mask this image by cloud mask image(500 meters)(It works well) and then I want to do another mask by AOD_MASK images(10 km) resolution.
But how to write MASK function for different dimension images I have no idea.
If you have any suggestion regarding this issue then, please suggested me.
Here I attached my test DATA please check it.
Below is my IDL code:
`Pro MASK
COMPILE_OPT idl2
ENVI, /RESTORE_BASE_SAVE_FILES
ENVI_BATCH_INIT, /NO_STATUS_WINDOW
print, ' ' + 'Batch Processing Start'
print, ' ' + systime()
;********************************************************************************
;Input location of different file
InputAOD = 'F:\DB_test_data\VAR2\TEST_DATA\Clip\'
Input_cloud = 'F:\DB_test_data\VAR2\TEST_DATA\Clip\'
Input_TOA_B1 = 'F:\DB_test_data\VAR2\TEST_DATA\Clip\'
;Output location
Output = 'F:\DB_test_data\OUT_PUT\'
; Search for inpiut data
FileCloud = File_search(Input_cloud + "*Aerosol_Cldmask_Land_Ocean-
Aerosol_Cldmask_Land_Ocean.tif",COUNT=Count_in1)
FileAOD = File_Search(InputAOD + '*Corrected_Optical_Depth_Land_2-
Corrected_Optical_Depth_Land.tif',COUNT=count_in)
FileBAND1 = File_Search(Input_TOA_B1 + '*pssgrp_000501330352.EV_500_RefSB_1-
EV_500_RefSB.tif',COUNT=COUNT)
nfiles = n_elements(count_in1)
;$$$$$$$$$$$$$$$$$$$$$$$$$$$$ $$$$$$$$$$$$$$$$$
FOR k=0, nfiles-1 DO BEGIN
Print, 'Processing File =', k +1
Print, 'Total Files=', count_in1
string_count = strtrim(nfiles,2)
;$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$
;cloud(500_METER)
ENVI_OPEN_FILE, FileCloud[k], r_FiD=FiD_cld
IF (FiD_cld EQ -1) then return
ENVI_FILE_QUERY, FiD_cld, dims=dims, nb=nb, ns=cld_ncols, nl=cld_nrows
pos = [0]
map_info = envi_get_map_info(FiD=FiD_cld)
Cloud = Double(ENVI_GET_DATA(FiD=FiD_cld, dims=dims, pos=[0]))
;$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$
;BAND_1(500_METER)
envi_open_file,FileBAND1[k],r_fid=fid_T_B1
if (fid_T_B1 eq -1)then return
envi_file_query,fid_T_B1,dims=dims,nb=nb, ns=toa_B1_ncols, nl=toa_B1_nrows
pos=[0]
map_info1=envi_get_map_info(fid=fid_T_B1)
layer1 = fltarr(toa_B1_ncols,toa_B1_nrows,nb)
FOR i=0,nb-1 DO BEGIN
layer1[*,*,i]=ENVI_GET_DATA(fid=fid_T_B1,dims=dims,pos=pos[i])
ENDFOR
;To replace 65535 with NAN
i=WHERE(layer1 eq 65535,count)
if (count gt 0)then layer1[i]=!VALUES.F_NAN
TOA_B1=layer1*0.000053811826d
;$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$
;AOD(10KM_RESOLUTION)
ENVI_OPEN_FILE, FileAOD[k], r_FiD=FiD_AOD
IF (FiD_AOD EQ -1) then return
ENVI_FILE_QUERY, FiD_AOD, dims=dims, nb=nb, ns=aod_ncols, nl=aod_nrows
pos = [0]
map_info = envi_get_map_info(FiD=FiD_AOD)
layer2 = fltarr(aod_ncols,aod_nrows,nb)
FOR i=0,nb-1 DO BEGIN
layer2[*,*,i]=ENVI_GET_DATA(fid=FiD_AOD,dims=dims,pos=pos[i])
ENDFOR
;To replace -9999 with NAN
i=WHERE(layer2 eq -9999,count)
if (count gt 0)then layer2[i]=!VALUES.F_NAN
AOD=layer2*0.0010000000474974513d
;set the specific range for mask
AOD_MASK = 1.0*(AOD GE 0.0 and AOD LE 0.1) + (AOD GT 0.1)*0.0
;$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$
; to get same dimensions for B1TOA and CLD data
if (cld_ncols le toa_B1_ncols) then begin
xsize = cld_ncols
endif else begin
xsize = toa_B1_ncols
endelse
if (cld_nrows le toa_B1_nrows) then begin
ysize = cld_nrows
endif else begin
ysize = toa_B1_nrows
endelse
;$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$
; to call parameters again with same dims
Cloud = FSC_Resize_Image(Cloud,xsize, ysize)
Band1 = FSC_Resize_Image(TOA_B1,xsize, ysize)
;$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$
;First masking processes with cloud mask images(500 meters)
Cloud_masked = Cloud * Band1
;To replace 0 with NAN
i=WHERE(Cloud_masked eq 0,count)
if (count gt 0)then Cloud_masked[i]=!VALUES.F_NAN
Cloud_masked_Band1=Cloud_masked
;Second masking processes with AOD_MASK images(10KM)
;$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$
; Define output FileName
FileName = STRJOIN(STRSPLIT(STRMID(FILE_BASENAME(FileBAND1[k], '.tif'), 0, 75), '.', /EXTRACT), '_')
print, FileName
; write output File
out_name_out = Output + FileName +'_masked_image.tif'
map_info = envi_get_map_info(FiD=fid_T_B1)
ENVI_WRITE_ENVI_FILE, Cloud_masked, out_name=out_name_out, map_info=map_info
;FileName = STRJOIN(STRSPLIT(STRMID(FILE_BASENAME(FileAOD[k], '.tif'), 0, 75), '.', /EXTRACT), '_')
;print, FileName
; write output File
;out_name_out = Output + FileName +'_AOD.tif'
;map_info = envi_get_map_info(FiD=FiD_AOD)
;ENVI_WRITE_ENVI_FILE, AOD,out_name=out_name_out, map_info=map_info
;$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$
Print, FileName + ' ' + strtrim(k+1,2) + ' of ' + string_count + ' Processed'
Endfor
Print, ' ' + systime()
Print, ' ' + 'Batch Processing End'
End
;$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$
FUNCTION FSC_Resize_Image, image, cols, rows, $
INTERPOLATE=interp, $
MINUS_ONE=minus_one
Compile_Opt idl2
; Check parameters.
IF N_Params() EQ 0 THEN BEGIN
Print, 'USE SYNTAX: FSC_Resize_Image, image, cols, rows'
RETURN, "FSC_Resize_Image Syntax Error"
ENDIF
; Return to the caller on an error.
On_Error, 2
; Check image parameter for size. Only 2D and 3D images allowed.
ndim = SIZE(image, /N_DIMENSIONS)
dims = SIZE(image, /DIMENSIONS)
IF ((ndim LT 2) OR (ndim GT 3)) THEN $
Message, 'Input image must have two or three dimensions.'
; Check for keywords. Default for minus_one is 0.
interp = Keyword_Set(interp)
IF N_Elements(minus_one) EQ 0 THEN minus_one = 0
m1 = Keyword_Set(minus_one)
; 2D images are immediately passed to IDL's Congrid command, with
; the CENTER keyword set.
IF ndim EQ 2 THEN BEGIN
RETURN, Congrid(image, cols, rows, CENTER=1, MINUS_ONE=minus_one, $
INTERP=interp)
ENDIF
; 24-bit images are handled differently. The "3" dimension of a 24-bit
; image should not be interpolated.
offset = 0.5 ; To center the pixel location (i.e., CENTER=1 for Congrid)
index3 = Where(dims EQ 3)
CASE index3 OF
0: BEGIN
srx = [0,1,2]
sry = Float(dims[1] - m1) / (cols - m1) *(Findgen(cols) + offset) - offset
srz = Float(dims[2] - m1) / (rows - m1) *(Findgen(rows) + offset) - offset
END
1: BEGIN
srx = Float(dims[0] - m1) / (cols - m1) *(Findgen(cols) + offset) - offset
sry = [0,1,2]
srz = Float(dims[2] - m1) / (rows - m1) *(Findgen(rows) + offset) - offset
END
2: BEGIN
srx = Float(dims[0] - m1) / (cols - m1) *(Findgen(cols) + offset) - offset
sry = Float(dims[1] - m1) / (rows - m1) *(Findgen(rows) + offset) - offset
srz = [0,1,2]
END
ENDCASE
; Do the interpolation. Preserve nearest neighbor sampling, if required.
IF interp THEN BEGIN
RETURN, Interpolate(image, srx, sry, srz, /GRID)
ENDIF ELSE BEGIN
RETURN, Interpolate(image, Round(srx), Round(sry), Round(srz), /GRID)
ENDELSE
END`
I have some data that I want to plot them with gnuplot. But I have for the same x value many y values, I will show you to understand well:
0 0.650765 0.122225 0.013325
0 0.522575 0.001447 0.010718
0 0.576791 0.004277 0.104052
0 0.512327 0.002268 0.005430
0 0.530401 0.000000 0.036541
0 0.518333 0.001128 0.017270
20 0.512864 0.001111 0.005433
20 0.510357 0.005312 0.000000
20 0.526809 0.001089 0.033523
20 0.527076 0.000000 0.034215
20 0.507166 0.001131 0.000000
20 0.513868 0.001306 0.004344
40 0.531742 0.003295 0.0365
In this example, I have 6 values for each x value.So how can I draw the average and the confidence bar(interval) ??
thanks for help
To do this, you will need some kind of external processing. One possibility would be to use gawk to calculate the required quantities and the feed this auxiliary output to Gnuplot to plot it. For example:
set terminal png enhanced
set output 'test.png'
fName = 'data.dat'
plotCmd(col_num)=sprintf('< gawk -f analyze.awk -v col_num=%d %s', col_num, fName)
set format y '%0.2f'
set xr [-5:25]
plot \
plotCmd(2) u 1:2:3:4 w yerrorbars pt 3 lc rgb 'dark-red' t 'column 2'
This assumes that the script analyze.awk resides in the same directory from which Gnuplot is launched (otherwise, it would be necessary to modify the path in the -f option of gawk. The script analyze.awk itself reads:
function analyze(x, data){
n = 0;mean = 0;
val_min = 0;val_max = 0;
for(val in data){
n += 1;
delta = val - mean;
mean += delta/n;
val_min = (n == 1)?val:((val < val_min)?val:val_min);
val_max = (n == 1)?val:((val > val_max)?val:val_max);
}
if(n > 0){
print x, mean, val_min, val_max;
}
}
{
curr = $1;
yval = $(col_num);
if(NR==1 || prev != curr){
analyze(prev, data);
delete data;
prev = curr;
}
data[yval] = 1;
}
END{
analyze(curr, data);
}
It directly implements the online algorithm to calculate the mean and for each distinct value of x prints this mean as well as the min/max values.
In the Gnuplot script, the column of interest is then passed to the plotCmd function which prepares the command to be executed and the output of which will be plotted with u 1:2:3:4 w yerrorbars. This syntax means that the confidence interval is stored in the 3rd/4th columns while the value itself (the mean) resides in the second column.
In total, the two scripts above produce the picture below. The confidence interval on the last point is not visible since the example data in your question contain only one record for x=40, thus the min/max values coincide with the mean.
You can easily plot the average in this case:
plot "myfile.dat" using ($1):($2 + $3 + $4)/3
If you want average of only second and fourth column for example, you can write ($2+$4)/2 and so on.
Trying to mosaic an image with rmagick.
How would one "mosaic blur" an existing image making the picture that it represents mosaic'ed ?
Like:
This is how you do a mosaic using RMagick
#!/home/software/ruby-1.8.5/bin/ruby -w
require 'RMagick'
# Demonstrate the mosaic method
a = Magick::ImageList.new
letter = 'A'
26.times do
# 'M' is not the same size as the other letters.
if letter != 'M'
a.read("images/Button_"+letter+".gif")
end
letter.succ!
end
# Make a copy of "a" with all the images quarter-sized
b = Magick::ImageList.new
page = Magick::Rectangle.new(0,0,0,0)
a.scene = 0
5.times do |i|
5.times do |j|
b << a.scale(0.25)
page.x = j * b.columns
page.y = i * b.rows
b.page = page
(a.scene += 1) rescue a.scene = 0
end
end
# Make a 5x5 mosaic
mosaic = b.mosaic
mosaic.write("mosaic.gif")
# mosaic.display
exit