Google Earth Engine - Buffer around Cloud Mask Sentinel2 - google-earth-engine

I'm trying to create a cloud free S2 Image. Now this code works, but I want to add a buffer of 20 or 30 meters around the cloud mask, because there still is the edge of the clouds in my image. [![Example image][1]][1]
* Function to mask clouds using the Sentinel-2 QA band
* #param {ee.Image} image Sentinel-2 image
* #return {ee.Image} cloud masked Sentinel-2 image
*/
function maskS2clouds(image) {
var qa = image.select('QA60');
// Bits 10 and 11 are clouds and cirrus, respectively.
var cloudBitMask = 1 << 10;
var cirrusBitMask = 1 << 11;
// Both flags should be set to zero, indicating clear conditions.
var mask = qa.bitwiseAnd(cloudBitMask).eq(0)
.and(qa.bitwiseAnd(cirrusBitMask).eq(0));
return image.updateMask(mask).divide(10000);
}
// Map the function over one year of data and take the median.
// Load Sentinel-2 TOA reflectance data.
var dataset = ee.ImageCollection('COPERNICUS/S2_SR')
.filterDate('2019-07-01', '2019-07-30')
// Pre-filter to get less cloudy granules.
.filter(ee.Filter.lt('CLOUDY_PIXEL_PERCENTAGE', 9))
.map(maskS2clouds);
var medianpixels = dataset.median()
var medianclipped = medianpixels.clip(geometry)
Map.addLayer(medianclipped,{
min: 0.0,
max: 0.3,
gamma: 1.7,
bands: ['B4', 'B3', 'B2'], }) ```
[1]: https://i.stack.imgur.com/HruSG.png

Related

Error: .select(...).sampleRegions is not a function - How to solve it?

I am trying to perform a supervised land cover classification from Sentinel SR images and get the following error:
SR_2018.select(...).sampleRegions is not a function
//import shapefile of study area
var boundary =ee.FeatureCollection(boundary);
Map.setCenter(43.4,5.5, 10)
/**
* Function to mask clouds using the Sentinel-2 QA band
* #param {ee.Image} image Sentinel-2 image
* #return {ee.Image} cloud masked Sentinel-2 image
*/
function maskS2clouds(image) {
var qa = image.select('QA60');
// Bits 10 and 11 are clouds and cirrus, respectively.
var cloudBitMask = 1 << 10;
var cirrusBitMask = 1 << 11;
// Both flags should be set to zero, indicating clear conditions.
var mask = qa.bitwiseAnd(cloudBitMask).eq(0)
.and(qa.bitwiseAnd(cirrusBitMask).eq(0));
return image.updateMask(mask).divide(10000);
}
//add an NDVI (Normalized Difference Vegetation Index) band to the images
var NDVI = function(image) {
// Add an NDVI band
return image.addBands(image.normalizedDifference(['B8', 'B4']).rename('NDVI'))
};
var dataset_SR = ee.ImageCollection('COPERNICUS/S2_SR')
.filter(ee.Filter.lt('CLOUDY_PIXEL_PERCENTAGE',20))
.filterBounds(ee.FeatureCollection(boundary))
.map(function(image){return image.clip(boundary)})
.map(NDVI);
print(dataset_SR)
//create yearly median composites by using the available Sentinel SR data
//year 2018
var SR_2018 = dataset_SR
.filterDate('2018-12-01', '2018-12-31')
.map(maskS2clouds)
.map(NDVI);
var SR_2018_median = SR_2018
.reduce(ee.Reducer.median());
print(SR_2018_median)
//load the training data for the supervised classification
var trainingSites= urban.merge(water).merge(veg_high).merge(soil).merge(veg_med);
//Choose all the median bands available including the median NDVI band.
//Use only these bands for the prediction
var SR_bands = ['B2_median', 'B3_median', 'B4_median', 'B5_median', 'B6_median','B7_median','B8_median', 'B8A_median', 'B11_median', 'B12_median',
'TCI_R_median', 'TCI_G_median', 'TCI_B_median',
'NDVI_median'];
//print(image.getInfo());
// Get the values for all pixels in each polygon in the training.
var trainingData = SR_2018.select(SR_bands).sampleRegions({
collection: trainingSites, // Get the sample from the polygons FeatureCollection.
properties: ['landcover'], //Keep this list of properties from the polygons.
scale: 10
});
// Get a randomForest classifier and train it- with the training data.
var classifier_Train = ee.Classifier.randomForest(10).train({
features: trainingData,
classProperty: 'landcover',
inputProperties: SR_bands //bands
});
I tried using .toBands() as suggested in this post https://stackoverflow.com/questions/63984413/image-selectbands-sampleregions-is-not-a-function-error-what-must-i-do
But it did not solve the problem.

How to display individual images by each date in google earth engine?

I am new to google earth engine and not so familiar with javascript. I want to display the cleared images (B4,B3,B2 bands) of Sentinel 2 by each dates in layers (each layer represent each date). The code is shown as below, but always get error 'no Band 4, constant band'. Can anyone help me to solve this problem? Thanks!
var lakes=table.geometry();
Map.centerObject(lakes, 15);
function maskS2clouds(image) {
var qa = image.select('QA60');
// Bits 10 and 11 are clouds and cirrus, respectively.
var cloudBitMask = 1 << 10;
var cirrusBitMask = 1 << 11;
// Both flags should be set to zero, indicating clear conditions.
var mask = qa.bitwiseAnd(cloudBitMask).eq(0)
.and(qa.bitwiseAnd(cirrusBitMask).eq(0));
return image.updateMask(mask).divide(10000);
}
var start = ee.Date('2015-06-20');
var finish = ee.Date('2018-06-01');
var collection = ee.ImageCollection('COPERNICUS/S2')
.filterDate(start, finish)
.filterBounds(lakes)
.filter(ee.Filter.lt('CLOUDY_PIXEL_PERCENTAGE', 10))
.map(maskS2clouds);
var rgbVis = {
min: 0.0,
max: 0.3,
bands: ['B4', 'B3', 'B2'],
};
function addImage(imageL) { // display each image in collection
var id = imageL.id;
var image = ee.Image(imageL.id);
Map.addLayer(image.select(['B4','B3','B2']).clip(lakes),rgbVis,id)
}
collection.evaluate(function(collection) { // use map on client-side
print(collection.features);
collection.features.map(addImage);
})

how to project curve to a planar in vtk

I have used the vtkDijkstraGraphGeodesicPath class(example: http://www.cmake.org/Wiki/VTK/Examples/Cxx/PolyData/DijkstraGraphGeodesicPath) to find the shortest path between two points on a mesh, and my next step is to project the path(curve) to a planar. Is there a class or function in vtk to project curve to planar?
And the other way is to sample the path(curve) and then project the sampled points to the planar, so how to sample the curve and get sampled points? Thank you in advance!‍
I never found a method doing the projection of 3D mesh, but I had to use it, and I selected texturizations methods allowing to project a mesh (to texzturize) on a Plane/Cylinder/Sphere.
The main method used in this case is vtkTextureMapToPlane.
// your mesh
vtkSmartPointer<vtkPolyData> mainMesh = myFilter->GetOutput ();
// extract your path from the poly data
// retrieve selected ids from dijkstra algo
vtkSmartPointer<vtkIdList> idList = dijkstra->GetIdList ();
vtkSmartPointer<vtkIdTypeArray> ids = convertToIdTypeArr (idList); // custom method to convert id array
// Once the ID selections is done, the extraction is invoked
vtkSmartPointer<vtkSelectionNode> selectionNode = vtkSelectionNode::New ();
selectionNode->SetFieldType (vtkSelectionNode::POINT);
selectionNode->SetContentType (vtkSelectionNode::INDICES);
selectionNode->SetSelectionList (ids);
vtkSmartPointer<vtkSelection> selection = vtkSelection::New ();
selection->AddNode (selectionNode);
vtkSmartPointer<vtkExtractSelection> extract = vtkExtractSelection::New ();
extract->SetInputData (0, pl);
extract->SetInputData (1, selection);
// convert result to polydata
vtkSmartPointer<vtkGeometryFilter> geoFilter = vtkGeometryFilter::New ();
geoFilter->SetInputConnection (extract->GetOutputPort());
geoFilter->Update();
vtkSmartPointer<vtkPolyData> selected = geoFilter->GetOutput();
You have a vtkpolyData with vertices from the path. You need to create the plane and project
// plane is sized with 800x600, on y-z directions
double orig[3] = {0, 0, 0};
double pt1[3] = {0, 600, 0};
double pt2[3] = {0, 0, 800};
// create TextureMapToPlan instance
vtkSmartPointer<vtkTextureMapToPlane> planeMapper = vtkTextureMapToPlane::New ();
planeMapper->SetOrigin(orig);
planeMapper->SetPoint1(pt1);
planeMapper->SetPoint2(pt2);
planeMapper->SetInputData (selected);
planeMapper->Update (); // project
vtkSmartPointer<vtkPolyData> d = planeMapper->GetPolyDataOutput(); // retrieve result
As this algorithm is used for texturization, you need to retrieve Texture coords, and convert them into the plane coordinates. (Text coords are defined in [0, 1] ratio of the height and width)
vtkSmartPointer<vtkDataArray> textCoord = d->GetPointData()->GetTCoords ();
vtkSmartPointer <vtkPoints> textPoints = vtkPoints::New ();
for (int i = 0; i < textCoord->GetNumberOfTuples (); ++i)
{
textPoints->InsertNextPoint (textCoord->GetTuple2(i)[0] * 800,
textCoord->GetTuple2(i)[1] * 600, 0);
}
textPoints got here all coordinates in 2 dimension of the projection of your path on the plane. /!\ This coords depends on your plane coordinates.

Draw (inverted) geometric circle in google maps api v3

I'm looking for a way to draw an inverted geometric (not geographical) circle in google maps api v3.
Essentially the goal is to dim the map except around a map object - as a way to make the map object stand out. To do this, I have employed an inverted overlay and have a method to create the circle "hole" in my "shadow-overlay".
However the method I've employed to get the lat/lng coordinates to generate this circle is adjusted to the Mercator projection and is not a consistent size or shape because it is relative to it's position from the equator. The method needs to create a circle (without using google's circle object - or using it with a way to extract it's path) that will calculate the lat/lng points from a center, based on a radius field that doesn't take the Mercator projection into account - such that it will display a perfect circle anywhere it is drawn on the map.
It shouldnt be hard, but I'm struggling to convert this function to NOT apply the Mercator projection into the result:
function getCircleCoords(point, radius) {
var d2r = Math.PI / 180; // degrees to radians
var points = 90;
var circleLatLngs = new Array();
var circleLat = radius * 0.621371192 * 0.014483;
var circleLng = circleLat / Math.cos( point.lat() * d2r);
for (var i = 0; i < points+1; i++) {
var theta = Math.PI * (i / (points / 2));
var vertexLat = point.lat() + (circleLat * Math.sin(theta));
var vertexLng = point.lng() + (circleLng * Math.cos(theta));
var vertexLat = point.lat() + (circleLat * (theta));
var vertexLng = point.lng() + (circleLng * (theta));
var vertextLatLng = new google.maps.LatLng(vertexLat, vertexLng);
circleLatLngs.push( vertextLatLng );
}
return circleLatLngs;
}
This would then get called like:
feature = new google.maps.Polygon({
paths: [[my_shadow_layer_path],[getCircleCoords(latLng_, 800)] ],
fillColor: '#ff0000',
fillOpacity: 0.5,
map: map_
});
}
Thoughts?
To use a Polygon, you need to calculate the circle coordinates in pixel (world) coordinates translate them to geographic coordinates using fromPointToLatLng to get the appropriate coordinates for the circle. Or use an overlay of the correct size and handle zoom changes yourself.
You can try this library https://github.com/raihan2006i/google-map-inverted-circle. which works on Google Map V3. See below for example code blocks
var iCircle;
var map;
function initialize() {
var mapDiv = document.getElementById('gMap');
map = new google.maps.Map(mapDiv, {
center: new google.maps.LatLng(37.4419, -122.1419),
zoom: 13,
mapTypeId: google.maps.MapTypeId.ROADMAP
});
iCircle = new InvertedCircle({
center: map.getCenter(),
map: map,
radius: 5000, // 5 km
editable: true,
stroke_weight: 5,
fill_opacity: 0.5,
fill_color: "#ff0000"
});
}
google.maps.event.addDomListener(window, 'load', initialize);

mapping rect in small image to larger image (in order to do a copyPixels operation)

this is (I think) a relatively simple math question but I've spent a day banging my head against it and have only the dents and no solution...
I'm coding in actionscript 3 - the functionality is:
large image loaded at runtime. The bitmapData is stored and a smaller version is created to display on the available screen area (I may end up just scaling the large image since it is in memory anyway).
The user can create a rectangle hotspot on the smaller image (the functionality will be more complex: multiple rects with transparency: example a donut shape with hole, etc)
3 When the user clicks on the hotspot, the rect of the hotspot is mapped to the larger image and a new bitmap "callout" is created, using the larger bitmap data. The reason for this is so the "callout" will be better quality than just scaling up the area of the hotspot.
The image below shows where I am at so far- the blue rect is the clicked hotspot. In the upper left is the "callout" - copied from the larger image. I have the aspect ratio right but I am not mapping to the larger image correctly.
Ugly code below... Sorry this post is so long - I just figured I ought to provide as much info as possible. Thanks for any tips!
--trace of my data values
*source BitmapDada 1152 864
scaled to rect 800 600
scaled BitmapData 800 600
selection BitmapData 58 56
scaled selection 83 80
ratio 1.44
before (x=544, y=237, w=58, h=56)
(x=544, y=237, w=225.04, h=217.28)
*
Image here: http://i795.photobucket.com/albums/yy237/skinnyTOD/exampleST.jpg
public function onExpandCallout(event:MouseEvent):void{
if (maskBitmapData.getPixel32(event.localX, event.localY) != 0){
var maskClone:BitmapData = maskBitmapData.clone();
//amount to scale callout - this will vary/can be changed by user
var scale:Number =150 //scale percentage
var normalizedScale :Number = scale/=100;
var w:Number = maskBitmapData.width*normalizedScale;
var h:Number = maskBitmapData.height*normalizedScale;
var ratio:Number = (sourceBD.width /targetRect.width);
//creat bmpd of the scaled size to copy source into
var scaledBitmapData:BitmapData = new BitmapData(maskBitmapData.width * ratio, maskBitmapData.height * ratio, true, 0xFFFFFFFF);
trace("source BitmapDada " + sourceBD.width, sourceBD.height);
trace("scaled to rect " + targetRect.width, targetRect.height);
trace("scaled BitmapData", bkgnImageSprite.width, bkgnImageSprite.height);
trace("selection BitmapData", maskBitmapData.width, maskBitmapData.height);
trace("scaled selection", scaledBitmapData.width, scaledBitmapData.height);
trace("ratio", ratio);
var scaledBitmap:Bitmap = new Bitmap(scaledBitmapData);
var scaleW:Number = sourceBD.width / scaledBitmapData.width;
var scaleH:Number = sourceBD.height / scaledBitmapData.height;
var scaleMatrix:Matrix = new Matrix();
scaleMatrix.scale(ratio,ratio);
var sRect:Rectangle = maskSprite.getBounds(bkgnImageSprite);
var sR:Rectangle = sRect.clone();
var ss:Sprite = new Sprite();
ss.graphics.lineStyle(8, 0x0000FF);
//ss.graphics.beginFill(0x000000, 1);
ss.graphics.drawRect(sRect.x, sRect.y, sRect.width, sRect.height);
//ss.graphics.endFill();
this.addChild(ss);
trace("before " + sRect);
w = uint(sRect.width * scaleW);
h = uint(sRect.height * scaleH);
sRect.inflate(maskBitmapData.width * ratio, maskBitmapData.height * ratio);
sRect.offset(maskBitmapData.width * ratio, maskBitmapData.height * ratio);
trace(sRect);
scaledBitmapData.copyPixels(sourceBD, sRect, new Point());
addChild(scaledBitmap);
scaledBitmap.x = offsetPt.x;
scaledBitmap.y = offsetPt.y;
}
}
Thanks!
public function onExpandCallout(event:MouseEvent):void{
// TODO: build this on startup or only on click? Speed vs memory
if (calloutState == true) return;
if (maskBitmapData.getPixel32(event.localX, event.localY) != 0){
calloutState = true;
//create bitmap from source using scaled selection rect
var ratio:Number = (sourceBMD.width /targetRect.width);
var sRect:Rectangle = hotSpotSprite.getBounds(bkgnImageSprite);
var destRect:Rectangle = new Rectangle(sRect.x * ratio, sRect.y * ratio, sRect.width * ratio, sRect.height * ratio);
calloutBitmapData = new BitmapData(destRect.width, destRect.height, true, 0xFFFFFFFF);
calloutBitmap = new Bitmap(calloutBitmapData);
//-- scale alpha mask
var scaledMaskBitmapData:BitmapData = new BitmapData(destRect.width, destRect.height, true, 0x00000000);
var maskScale:Number = scaledMaskBitmapData.width / maskBitmapData.width;
var mMatrix:Matrix = new Matrix(maskScale, 0, 0, maskScale);
scaledMaskBitmapData.draw(maskBitmapData,mMatrix,null,null,null, false);
// copy source with scaled alpha
calloutBitmapData.copyPixels(sourceBMD, destRect, new Point(), scaledMaskBitmapData, new Point());
scaledMaskBitmapData = null;
// apply filter to bitmap
var myDropShadowFilter:DropShadowFilter = new DropShadowFilter();
myDropShadowFilter.distance = 12;
myDropShadowFilter.alpha = .3
myDropShadowFilter.strength = 1;
myDropShadowFilter.blurX = 8;
myDropShadowFilter.blurY = 8;
calloutBitmap.filters = [myDropShadowFilter];
//place on screen
calloutSprite = new Sprite();
calloutSprite.addChild(calloutBitmap)
calloutSprite.x = offsetPt.x;
calloutSprite.y = offsetPt.y;
// ADD TO PARENT DisplayContainer
calloutLayer.addChild(calloutSprite);
// calloutSprite.scaleX = 2;
// calloutSprite.scaleY = 2;
calloutSprite.doubleClickEnabled = true;
calloutSprite.addEventListener(MouseEvent.DOUBLE_CLICK, onCollapseCallout);
calloutSprite.addEventListener(MouseEvent.MOUSE_DOWN, onStartDrag);
calloutSprite.addEventListener(MouseEvent.MOUSE_UP, onStopDrag);
}
}

Resources