Are recursive modules even a possibility in Terraform?
I want to create a module called my_module with the below content
variable "name" {
type = "string"
}
variable "children" {
type = list(object)
}
resource "XXX" "example_resource" {
name = var.name
}
module "example_recursion" {
source = "../my_module"
count = length(var.children)
name = var.children[count.index].name
}
When I do init it seems to get itself in a twist and ends up corrupting the module cache!
Initializing modules...
- name in ../modules/my_module
- name.example_recursion in ../modules/my_module
- name.example_recursion.example_recursion in ../modules/my_module
- name.example_recursion.example_recursion.example_recursion in ../modules/my_module
- name.example_recursion.example_recursion.example_recursion.example_recursion in ../modules/my_module
- name.example_recursion.example_recursion.example_recursion.example_recursion.example_recursion in ../modules/my_module
- name.example_recursion.example_recursion.example_recursion.example_recursion.example_recursion.example_recursion in ../modules/my_module
- name.example_recursion.example_recursion.example_recursion.example_recursion.example_recursion.example_recursion.example_recursion in ../modules/my_module
- name.example_recursion.example_recursion.example_recursion.example_recursion.example_recursion.example_recursion.example_recursion.example_recursion in ../modules/my_module
- name.example_recursion.example_recursion.example_recursion.example_recursion.example_recursion.example_recursion.example_recursion.example_recursion.example_recursion in ../modules/my_module
- name.example_recursion.example_recursion.example_recursion.example_recursion.example_recursion.example_recursion.example_recursion.example_recursion.example_recursion.example_recursion in ../modules/my_module
- name.example_recursion.example_recursion.example_recursion.example_recursion.example_recursion.example_recursion.example_recursion.example_recursion.example_recursion.example_recursion.example_recursion in ../modules/my_module
Error: Failed to remove local module cache
│
│ Terraform tried to remove
│ .terraform/modules/name.example_recursion.example_recursion.example_recursion.example_recursion.example_recursion.example_recursion.example_recursion.example_recursion.example_recursion.example_recursion.example_recursion.example_recursion.example_recursion.example_recursion
│ in order to reinstall this module, but encountered an error: unlinkat
│ .terraform/modules/name.example_recursion.example_recursion.example_recursion.example_recursion.example_recursion.example_recursion.example_recursion.example_recursion.example_recursion.example_recursion.example_recursion.example_recursion.example_recursion.example_recursion:
│ file name too long
Related
I have the following project setup:
configs/
├── default.yaml
└── trainings
├── data_config
│ └── default.yaml
├── simple.yaml
└── schema.yaml
The content of the files are as follows:
app.py:
from dataclasses import dataclass
from enum import Enum
from pathlib import Path
from omegaconf import MISSING, DictConfig, OmegaConf
import hydra
from hydra.core.config_store import ConfigStore
CONFIGS_DIR_PATH = Path(__file__).parent / "configs"
TRAININGS_DIR_PATH = CONFIGS_DIR_PATH / "trainings"
class Sampling(Enum):
UPSAMPLING = 1
DOWNSAMPLING = 2
#dataclass
class DataConfig:
sampling: Sampling = MISSING
#dataclass
class TrainerConfig:
project_name: str = MISSING
data_config: DataConfig = MISSING
# #hydra.main(version_base="1.2", config_path=CONFIGS_DIR_PATH, config_name="default")
#hydra.main(version_base="1.2", config_path=TRAININGS_DIR_PATH, config_name="simple")
def run(configuration: DictConfig):
sampling = OmegaConf.to_container(cfg=configuration, resolve=True)["data_config"]["sampling"]
print(f"{sampling} Type: {type(sampling)}")
def register_schemas():
config_store = ConfigStore.instance()
config_store.store(name="base_schema", node=TrainerConfig)
if __name__ == "__main__":
register_schemas()
run()
configs/default.yaml:
defaults:
- /trainings#: simple
- _self_
project_name: test
configs/trainings/simple.yaml:
defaults:
- base_schema
- data_config: default
- _self_
project_name: test
configs/trainings/schema.yaml:
defaults:
- data_config: default
- _self_
project_name: test
configs/trainings/data_config/default.yaml:
defaults:
- _self_
sampling: DOWNSAMPLING
Now, when I run app.py as shown above, I get the expected result (namely, "DOWNSAMPLING" gets resolved to an enum type). However, when I try to run the application where it constructs the configuration from the default.yaml in the parent directory then I get this error:
So, when the code is like so:
...
#hydra.main(version_base="1.2", config_path=CONFIGS_DIR_PATH, config_name="default")
# #hydra.main(version_base="1.2", config_path=TRAININGS_DIR_PATH, config_name="simple")
def run(configuration: DictConfig):
...
I get the error below:
In 'trainings/simple': Could not load 'trainings/base_schema'.
Config search path:
provider=hydra, path=pkg://hydra.conf
provider=main, path=file:///data/code/demos/hydra/configs
provider=schema, path=structured://
Set the environment variable HYDRA_FULL_ERROR=1 for a complete stack trace.
I do not understand why specifying the schema to be used is causing this issue. Would someone have an idea why and what could be done to fix the problem?
If you are using default lists in more than one config file I strongly suggest that you fully read andf understand The Defaults List page.
Configs addressed in the defaults list are relative to the config group of the containing config.
The error is telling you that Hydra is looking for base_schema in trainings, because the defaults list that loads base_schema is in trainings.
Either put base_schema inside trainings when you register it:
config_store.store(group="trainings", name="base_schema", node=TrainerConfig)
Or use absolute addressing in the defaults list when addressing it (e.g. in configs/trainings/simple.yaml):
defaults:
- /base_schema
- data_config: default
- _self_
I have created the azure automation account using terraform code. I have multiple runbooks PowerShell scripts saved in my local. I am using for.each option to import all the runbooks at a time. But I am getting some errors while running the terraform file. Please find my code below:
resource "azurerm_automation_runbook" "example" {
for_each = fileset(".", "./Azure_Runbooks/*.ps1")
name = ["${split("/", each.value)}"][1]
location = var.location
resource_group_name = var.resource_group
automation_account_name = azurerm_automation_account.example.name
log_verbose = var.log_verbose
log_progress = var.log_progress
runbook_type = var.runbooktype
content = filemd5("${each.value}")
}
Error:
Error: Invalid index
│
│ on AutomationAccount\main.tf line 51, in resource "azurerm_automation_runbook" "example":
│ 51: name = ["${split("/", each.value)}"][1]
│ ├────────────────
│ │ each.value will be known only after apply
│
│ The given key does not identify an element in this collection value: the given index is greater than
│ or equal to the length of the collection.
Can someone please help how I can upload all my existing runbook scrips to the newly created automation account using terraform code.
You don't need list in a list. So instead of
name = ["${split("/", each.value)}"][1]
it should be
name = split("/", each.value)[1]
This is similar to this question, but I believe we are encountering different issues.
Setup:
I have a Kotlin class that interfaces with a TinyMCE instance running in a JavaFX Webview. I am setting up upcalls from Javascript to JavaFX as shown below, in the TinyMCEInterface class:
inner class BridgeObject {
fun setEditorAndSelection(ed : JSObject?) {
editorObj = ed
selectionObj = editorObj?.getMember("selection") as JSObject?
}
fun setInterfaceContent(newContent : String) {
contentProp.value = newContent
}
}
// after WebEngine loads successfully
val bridgeObj : BridgeObject = BridgeObject()
(webEngine.executeScript("window") as JSObject).setMember("bridgeObj", bridgeObj)
I am then calling these methods from Javascript, in TinyMCE setup:
ed.on('init', function() {
ed.setContent(initContent)
window.bridgeObj.setEditorAndSelection(ed)
window.bridgeObj.setInterfaceContent(ed.getContent())
});
ed.on('Change SelectionChange', function(e) {
window.bridgeObj.setInterfaceContent(ed.getContent())
});
Problem:
This works perfectly fine when the TinyMCEInterface class file lies in the root directory of my application (in package com.myself.app). That is, when the file structure looks like this:
com.myself.app/
│
├─ TinyMCEInterface.kt
│
├─ Main.kt
But breaks when I move TinyMCEInterface into a package (in package com.myself.app.somepackage):
com.myself.app/
│
├─ somepackage/
│ ├─ TinyMCEInterface.kt
│
├─ Main.kt
When I say "breaks", there are no errors; the calls to member functions of window.bridgeObj simply do not happen and quietly fail. I am completely bewildered as to how this can be happening.
Thanks in advance for any advice!
I´ve tried using google_download(“URL”, “local_path”) from “GoogleDrive” library, but it seems to only get the first sheet in csv format.
Have any clue?
It looks like the guts of GoogleDrive.jl is just doing some url manipulation.
https://github.com/tejasvaidhyadev/GoogleDrive.jl/blob/master/src/GoogleDrive.jl#L26
isg_sheet(url) = occursin("docs.google.com/spreadsheets", url)
isg_drive(url) = occursin("drive.google.com", url)
function sheet_handler(url; format=:csv)
link, expo = splitdir(url)
if startswith(expo, "edit") || expo == ""
url = link * "/export?format=$format"
elseif startswith(expo, "export")
url = replace(url, r"format=([a-zA-Z]*)(.*)"=>SubstitutionString("format=$format\\2"))
end
url
end
function google_download(url, localdir)
long_url = unshortlink(url)
if isg_sheet(long_url)
long_url = sheet_handler(long_url)
end
if isg_drive(long_url)
drive_download(long_url, localdir)
else
DataDeps.fetch_http(long_url, localdir)
end
end
Google Sheets API
If you want to do much more than that you need to actually use the Google Sheet's API.
https://developers.google.com/sheets/api/reference/rest/v4/spreadsheets/get
GET https://sheets.googleapis.com/v4/spreadsheets/{spreadsheetId}
https://developers.google.com/sheets/api/reference/rest/v4/spreadsheets#Spreadsheet
With the GET on a spreadsheet id (found in the URL for the spreadsheet) the response contains the ids for all the sheets in the spreadsheet.
{
"spreadsheetId": string,
"properties": {
object (SpreadsheetProperties)
},
"sheets": [
{
object (Sheet)
}
],
"namedRanges": [
{
object (NamedRange)
}
],
"spreadsheetUrl": string,
"developerMetadata": [
{
object (DeveloperMetadata)
}
]
}
Then you can pull out those sheet id's and do something with them, such as do an export request on each with a format of csv.
Manually scape the gid for each sheet
If you don't want to use the API, and just copy those ids out of the browser URL as you click on each, you may be able to turn them into download links.
To get them to go thru the export link properly you need to pass the gid into the export url like so:
(This is a slightly modified version of GoogleDrive.sheet_handler that takes in the sheet gid)
function sheet_handler(url; format=:csv, sheet_gid=0)
link, expo = splitdir(url)
if startswith(expo, "edit") || expo == ""
url = link * "/export?format=$format&gid=$sheet_gid"
elseif startswith(expo, "export")
url = replace(url, r"format=([a-zA-Z]*)(.*)"=>SubstitutionString("format=$format&gid=$sheet_gid\\2"))
end
url
end
So for my example test sheet I had three sheets with the following gids
Sheet1, gid=0
Sheet2, gid=972467363
Sheet3, gid=1251741166
So to grab the third one, I did this:
DataDeps.fetch_http(sheet_handler(url; format=:csv, sheet_gid=1251741166), ".")
Here is the example run:
julia> using GoogleDrive
julia> using GoogleDrive.DataDeps
julia> url = read("link.txt", String)
"https://docs.google.com/spreadsheets/d/13-LtgMi8evaxGxUTwlZZ_lqmr8Epcqt1ZSPUszqWhW4/edit?usp=sharing"
julia> DataDeps.fetch_http(sheet_handler(url; format=:csv, sheet_gid=1251741166), ".")
┌ Info: Downloading
│ source = "https://docs.google.com/spreadsheets/d/13-LtgMi8evaxGxUTwlZZ_lqmr8Epcqt1ZSPUszqWhW4/export?format=csv&gid=1251741166"
│ dest = "./export?format=csv&gid=1251741166"
│ progress = NaN
│ time_taken = "0.0 s"
│ time_remaining = "NaN s"
│ average_speed = "∞ B/s"
│ downloaded = "404 bytes"
│ remaining = "∞ B"
└ total = "∞ B"
┌ Info: Downloading
│ source = "https://docs.google.com/spreadsheets/d/13-LtgMi8evaxGxUTwlZZ_lqmr8Epcqt1ZSPUszqWhW4/export?format=csv&gid=1251741166"
│ dest = "./download-test-julia-Sheet3.csv"
│ progress = NaN
│ time_taken = "0.0 s"
│ time_remaining = "NaN s"
│ average_speed = "7.324 KiB/s"
│ downloaded = "15 bytes"
│ remaining = "∞ B"
└ total = "∞ B"
"./download-test-julia-Sheet3.csv"
shell> cat download-test-julia-Sheet3.csv
Data on Sheet 3
Download as a format that has all the sheets
If you use the sheet_handler call and pass in a format that supports multiple sheets, then you can just parse and manipulate the output locally. Such as with xlsx. I haven't tried it just now, but the call would be something like:
DataDeps.fetch_http(sheet_handler(url; format=:xlsx), ".")
Then find your favorite Julia Excel library and you are off to the races.
I wrote simple task in Grunt. Now, I would like to export this task to another file, and I have a problem with it.
How can I find file with my task? This task just searches string from website and putting it in the file. I'm trying load it by: grunt.loadTasks('grunt-find');
I have file (grunt-find) with find.js inside. But it doesn’t work... Can I have to add find.js somewhere else?
Thanks in advance.
grunt.loadTask() will load every JS file in the directory provided as argument; so, basically, you have to do something like:
grunt.loadTasks("tasks");
and you may have a directory called "tasks":
project root
|- tasks
|--- file.js
|- Gruntfile.js
If you find the "directory only" behavior of grunt.loadTask() annoying (i.e. want to keep external tasks definitions next to external task configuration), you can try something like that :
module.exports = function(grunt) {
var env = process.env.NODE_ENV || 'dev';
var _ = require('lodash');
/*** External config & tasks filepaths ***/
//we have 1 base config, and possibly many module-specific config
var configLocations = ['./grunt-config/default_config.js', './grunt-config/**/config.js'];
//we have 1 base tasks definition, and possibly many module-specific config
var tasksLocations = ['./grunt-config/default_tasks.js', './grunt-config/**/tasks.js'];
/* Typical project layout (matching with the globbing pattern above - adapt to your project structure) :
├── Gruntfile.js
├── package.json
├── grunt-config
│ ├── homepage
│ │ └── config.js
│ ├── navigation
│ │ └── config.js
│ ├── module1
│ │ ├── config.js
│ │ └── tasks.js
│ ├── default_config.js
│ ├── default_tasks.js
│ └── template_module_grunt.txt
├── website_directory1
│ ├── mdp
│ ├── multimedia-storage
│ ├── mv-commit.sh
│ ├── query
│ ├── temp
│ └── tmp
└── website_directory2
├── crossdomain.xml
├── css
├── favicon.ico
├── fonts
:
:
:
*/
/***************** External configuration management ***********************************/
var configFiles = grunt.file.expand({
filter: "isFile"
}, configLocations );
grunt.log.writeln('Gathering external configuration files'.underline.green);
grunt.log.writeln("configFiles : " + grunt.log.wordlist(configFiles, {
separator: ', ',
color: 'cyan'
}));
var configArray = configFiles.map(function(file) {
grunt.log.writeln("=> importing : " + file);
return require(file)(grunt, env);
});
var config = {};
configArray.forEach(function(element) {
config = _.merge(config, element);
});
grunt.initConfig(config);
/***************** Task loading & registering *******************************************/
// We load grunt tasks listed in package.json file
require('load-grunt-tasks')(grunt);
/****** External tasks registering ****************/
grunt.log.writeln('Gathering external task files'.underline.green);
var taskFiles = grunt.file.expand({
filter: "isFile"
}, tasksLocations);
grunt.log.writeln("task files : " + grunt.log.wordlist(taskFiles, {
separator: ', ',
color: 'cyan'
}));
taskFiles.forEach(function(path) {
grunt.log.writeln("=> loading & registering : " + path);
require(path)(grunt);
});
grunt.registerTask('default', ['jshint:gruntfile', 'logConfig']);
grunt.registerTask('checkGruntFile', 'Default task - check the gruntfile', function() {
grunt.log.subhead('* Tâche par défaut - aide et vérification du gruntfile *');
grunt.log.writeln('Exécutez "grunt -h" pour avoir plus d\'informations sur les tâches disponibles');
grunt.log.writeln('...');
grunt.log.subhead('Vérification du gruntfile...');
grunt.task.run(['jshint:gruntfile']);
});
//write the generated configuration (for debug)
grunt.registerTask('logConfig', 'Write the generated conf', function() {
//grunt.task.run(['attention:gruntfile']);
grunt.log.subhead('* Configuration générée : *');
grunt.log.writeln(JSON.stringify(config, undefined, 2));
});
};
Source : https://gist.github.com/0gust1/7683132