OMNET++ - How to prioritize UDP over TCP - tcp

Hello i'm new with omnet++ and networking simulations, i have the following network:
import inet.common.misc.NetAnimTrace;
import inet.networklayer.configurator.ipv4.IPv4NetworkConfigurator;
import inet.node.inet.StandardHost;
import ned.DatarateChannel;
import inet.networklayer.configurator.ipv4.IPv4NetworkConfigurator;
import inet.node.ethernet.Eth100M;
import inet.node.inet.Router;
import inet.node.inet.StandardHost;
import inet.node.inet.WirelessHost;
import inet.node.wireless.AccessPoint;
import inet.physicallayer.ieee80211.packetlevel.Ieee80211ScalarRadioMedium;
import inet.visualizer.contract.IIntegratedVisualizer;
network clienteServer
{
#display("bgb=500,300");
submodules:
visualizer: <default("IntegratedCanvasVisualizer")> like IIntegratedVisualizer if hasVisualizer() {
parameters:
#display("p=100,50");
}
configurator: IPv4NetworkConfigurator {
parameters:
assignDisjunctSubnetAddresses = false;
#display("p=100,150");
}
radioMedium: Ieee80211ScalarRadioMedium {
parameters:
#display("p=100,250");
}
TCP1: WirelessHost {
parameters:
#display("p=186,178");
}
UDP1: WirelessHost {
parameters:
#display("p=193,77");
}
Server: WirelessHost {
parameters:
#display("p=438,122");
}
accessPoint: AccessPoint {
parameters:
#display("p=315,108");
}
UDP2: WirelessHost {
parameters:
#display("p=255,30");
}
}
and my .ini file, is the following, is very simple
[General]
network = clienteServer
total-stack = 7MiB
tkenv-plugin-path = ../../../etc/plugins
debug-on-errors = true
record-eventlog = true
**.addDefaultRoutes = false
**.UDP1.numUdpApps = 10
**.UDP1.udpApp[*].typename = "UDPVideoStreamCli"
**.UDP1.udpApp[*].serverAddress = "Server"
**.UDP1.udpApp[*].serverPort = 1000
**.UDP2.numUdpApps = 1
**.UDP2.udpApp[*].typename = "UDPBasicApp"
**.UDP2.udpApp[*].destAddresses = "Server"
**.UDP2.udpApp[*].messageLength = 1000B
**.UDP2.udpApp[*].sendInterval = 12ms
**.Server.numUdpApps = 1
**.Server.udpApp[*].typename = "UDPVideoStreamSvr"
**.Server.udpApp[*].localPort = 1000
**.Server.udpApp[*].sendInterval = 10ms
**.Server.udpApp[*].packetLen = 2000B
**.Server.udpApp[*].videoSize = 100000B
**.TCP1.numTcpApps = 1
**.TCP1.tcpApp[*].typename = "TelnetApp"
**.TCP1.tcpApp[0].localAddress = ""
**.TCP1.tcpApp[0].localPort = -1
**.TCP1.tcpApp[0].connectAddress = "Server"
**.TCP1.tcpApp[0].connectPort = 1000
**.TCP1.tcpApp[0].startTime = 0
**.TCP1.tcpApp[0].commandLength = exponential(10B)
**.TCP1.tcpApp[0].commandOutputLength = exponential(5B)
**.TCP1.tcpApp[0].thinkTime = truncnormal(1s,2s)
**.TCP1.tcpApp[0].idleInterval = 10ms
**.TCP1.tcpApp[0].reconnectInterval = 3s
**.TCP1.tcpApp[0].dataTransferMode = "object"
**.Server.numTcpApps = 1
**.Server.tcpApp[*].typename = "TCPGenericSrvApp"
**.Server.tcpApp[0].localAddress = ""
**.Server.tcpApp[0].localPort = 1000
**.Server.tcpApp[0].replyDelay = 0
**.initialZ = 0
**.scalar-recording = true
**.vector-recording = true
As you can see, i only have 4 host, one that acts like a server, 2 UDP ones and one that send TCP msgs, How can i prioritize udp traffic over tcp, in a simple way, any suggestions please??

Related

terraform that creates single aurora cluster and schedules cluster to run during business hours

Sorry I am a beginner at terraform and found some useful modules.
I need to make a single aurora instance cluster for non-production and I need to shutdown after business hours.
how can I create an aurora cluster and schedule to run during business hours???
I can't get the scheduler to connect to the RDS cluster
provider "aws" {
region = local.region
}
locals {
name = "example-aurora"
region = "us-east-1"
tags = {
Owner = "user"
Environment = "dev"
}
}
################################################################################
# Supporting Resources
################################################################################
resource "random_password" "master" {
length = 10
}
module "vpc" {
source = "terraform-aws-modules/vpc/aws"
version = "~> 3.0"
name = "aurora_vpc"
cidr = "10.99.0.0/18"
enable_dns_support = true
enable_dns_hostnames = true
azs = ["${local.region}a", "${local.region}b", "${local.region}c"]
public_subnets = ["10.99.0.0/24", "10.99.1.0/24", "10.99.2.0/24"]
private_subnets = ["10.99.3.0/24", "10.99.4.0/24", "10.99.5.0/24"]
database_subnets = ["10.99.7.0/24", "10.99.8.0/24", "10.99.9.0/24"]
tags = local.tags
}
resource "aws_db_parameter_group" "muffy-pg" {
family = "postgres13"
name = "peter-rds-param-group"
parameter {
apply_method = "immediate"
name = "autovacuum_naptime"
value = "30"
}
parameter {
apply_method = "pending-reboot"
name = "autovacuum_max_workers"
value = "15"
}
}
resource "aws_docdb_cluster_parameter_group" "muffy-cluster-pg" {
name = "peter-rds-param-group"
family = "postgres13"
}
module "cluster" {
source = "terraform-aws-modules/rds-aurora/aws"
name = "test-aurora-db-postgres96"
engine = "aurora-postgresql"
engine_version = "13.7"
instance_class = "db.t3.small"
instances = {
one = {}
two = {}
}
vpc_id = module.vpc.vpc_id
subnets = [module.vpc.database_subnets[0], module.vpc.database_subnets[1], module.vpc.database_subnets[2]]
# allowed_security_groups = ["sg-12345678"]
allowed_cidr_blocks = ["10.99.0.0/18"]
storage_encrypted = true
apply_immediately = true
monitoring_interval = 10
db_parameter_group_name = aws_db_parameter_group.muffy-pg.name
db_cluster_parameter_group_name = aws_docdb_cluster_parameter_group.muffy-cluster-pg.name
enabled_cloudwatch_logs_exports = ["postgresql"]
tags = {
Environment = "dev"
Terraform = "true"
}
}
variable "environment" {
default = "dev"
}
module "rds_schedule" {
depends_on = [module.cluster]
source = "github.com/barryw/terraform-aws-rds-scheduler"
# version = "~> 2.0.0"
/* Don't stop RDS in production! */
skip_execution = var.environment == "prod"
identifier = "peter-scheduler"
/* Start the RDS cluster at 6:50am EDT Monday - Friday */
up_schedule = "cron(50 10 ? * MON-FRI *)"
/* Stop the RDS cluster at 9pm EDT every night */
down_schedule = "cron(0 1 * * ? *)"
rds_identifier = module.cluster.identifier
is_cluster = true
}
I guess the issue is with rds_identifier value used under module "rds_schedule"
rds_identifier = module.cluster.identifier
It should be,
rds_identifier = module.cluster.cluster_id
The source module for the aurora cluster used here "terraform-aws-modules/rds-aurora/aws" outputs the cluster identifier as cluster_id not cluster_identifier
Github Reference:
https://registry.terraform.io/modules/terraform-aws-modules/rds-aurora/aws/latest#outputs

How to generate a map from two disjoint datastructures in Terraform?

I have a list and a map like so:
locals {
traffic_rules = [
{
name = "eu-pool"
geo_mappings = ["GEO-EU"]
failover = [local.endpoint_pools.a, local.endpoint_pools.b, local.endpoint_pools.c]
},
{
name = "world-pool"
geo_mappings = ["WORLD"]
failover = [local.endpoint_pools.a]
},
]
endpoint_pools = {
a = [
{
fqdn = "endpoint_pool.a.com."
weight = 1
}]
b = [
{
fqdn = "endpoint_pool.b1.com."
weight = 1
},
{
fqdn = "endpoint_pool.b2.com."
weight = 1
}]
c = [
{
fqdn = "endpoint_pool.c.com."
weight = 1
}]
}
Basically, I want to iterate through traffic_rules and gather:
the index of failover[item] in traffic_rules when I iterate through it
the name of the traffic_rule, eg "eu-pool"
the string "a/b/c" from local.endpoint_pools.a/b/c (these are also keys in endpoint_pools map)
In the end, I need to be able to generate a flattened map like below:
{
“eu-pool-a” = {
endpoint_pool_name = "a"
priority = 1
top_profile_name = “eu-pool”,
bottom_profile_name = “bottom-profile-a”
}
“eu-pool-b” = {
endpoint_pool_name = "b"
priority = 2
top_profile_name = “eu-pool”,
bottom_profile_name = “bottom-profile-b”
}
“eu-pool-c” = {
endpoint_pool_name = "c"
priority = 3
top_profile_name = “eu-pool”,
bottom_profile_name = “bottom-profile-c”
}
“world-pool-a” = {
endpoint_pool_name = "a"
priority = 1
top_profile_name = “world-pool”,
bottom_profile_name = “bottom-profile-a”
}
}
I started off trying to flatten traffic_rules, then realized that TF is substituting local.endpoint_pools.a/b/c into its actual contents, so I am completely losing the context/string of a/b/c.
Like #MartinAtkins recommend, you should really concentrate on simplify your input data, rather then to try to hack up some complex TF expression.
Also your output data structure does not agree with your input data, as per my understanding, as you have two endpoints for endpoint_pools[b] yet they are not accounted for in your output data. I assume that you want to use both of them, or the first one only?
Anyway, you can first create a helper structure in locals which maps endpoint to pool name:
locals {
fqdn_to_pool_name = merge([
for pool_name,v in local.endpoint_pools:
{
for v1 in v:
"${v1.fqdn}" => {pool_name = pool_name}
}
]...)
}
Then you can generate your structure as follows (I assume you account for both endpoints):
output "test" {
value = merge(flatten([for rule in local.traffic_rules:
[for idx, failovers in rule.failover:
{ for idx2, failover in failovers:
"${rule.name}-${local.fqdn_to_pool_name[failover.fqdn].pool_name}" => {
endpoint_pool_name = local.fqdn_to_pool_name[failover.fqdn].pool_name
priority = idx + 1
top_profile_name = rule.name
bottom_profile_name = "bottom-profile-${local.fqdn_to_pool_name[failover.fqdn].pool_name}"
}...
}
]
])...)
}
which gives:
test = {
"eu-pool-a" = [
{
"bottom_profile_name" = "bottom-profile-a"
"endpoint_pool_name" = "a"
"priority" = 1
"top_profile_name" = "eu-pool"
},
]
"eu-pool-b" = [
{
"bottom_profile_name" = "bottom-profile-b"
"endpoint_pool_name" = "b"
"priority" = 2
"top_profile_name" = "eu-pool"
},
{
"bottom_profile_name" = "bottom-profile-b"
"endpoint_pool_name" = "b"
"priority" = 2
"top_profile_name" = "eu-pool"
},
]
"eu-pool-c" = [
{
"bottom_profile_name" = "bottom-profile-c"
"endpoint_pool_name" = "c"
"priority" = 3
"top_profile_name" = "eu-pool"
},
]
"world-pool-a" = [
{
"bottom_profile_name" = "bottom-profile-a"
"endpoint_pool_name" = "a"
"priority" = 1
"top_profile_name" = "world-pool"
},
]
}

Can't fix error "RuntimeError: You need to use the gevent-websocket server." and "OSError: write error"

I am writing a website for Flask. I use a bunch of uWSGI + NGINX + Flask-Socketio. I use gevent as an asynchronous module. Errors occur during operation:
RuntimeError: You need to use the gevent-websocket server.
uwsgi_response_writev_headers_and_body_do(): Broken pipe [core/writer.c line 306] during >
Feb 23 12:57:55 toaa uwsgi[558436]: OSError: write error
I tried different configurations and also removed the async_mode='gevent' from the socketio initialization.
wsgi.py file:
from webapp import app, socketio
if __name__ == '__main__':
socketio.run(app, use_reloader=False, debug=True, log_output=True)
project.ini:
[uwsgi]
module = wsgi:app
master = true
gevent = 1024
gevent-monkey-patch = true
buffer-size=32768 # optionally
socket = /home/sammy/projectnew/projectnew.sock
socket-timeout = 240
chmod-socket = 664
vacuum = true
die-on-term = true
webapp/__init__.py for application app:
from gevent import monkey
monkey.patch_all()
import grpc.experimental.gevent
grpc.experimental.gevent.init_gevent()
from flask import Flask, session, request
from config import DevelopConfig, MqttConfig, MailConfig, ProductionConfig
from flask_sqlalchemy import SQLAlchemy
from flask_migrate import Migrate
from flask_mail import Mail
from flask_script import Manager
from flask_socketio import SocketIO
# from flask_mqtt import Mqtt
from flask_login import LoginManager
from flask_babel import Babel
from flask_babel_js import BabelJS
from flask_babel import lazy_gettext as _l
from apscheduler.schedulers.gevent import GeventScheduler
# from celery import Celery
app = Flask(__name__)
app.config.from_object(ProductionConfig)
app.config.from_object(MqttConfig)
app.config.from_object(MailConfig)
db = SQLAlchemy(app)
migrate = Migrate(app, db)
mail = Mail(app)
manager = Manager(app, db)
login_manager = LoginManager(app)
login_manager.login_view = 'auth'
login_manager.login_message = _l("Необходимо авторизоваться для доступа к закрытой странице")
login_manager.login_message_category = "error"
# celery = Celery(app.name, broker=Config.CELERY_BROKER_URL)
# celery.conf.update(app.config)
scheduler = GeventScheduler()
# socketio = SocketIO(app) - Production Version
socketio = SocketIO(app, async_mode='gevent')
babel = Babel(app)
babeljs = BabelJS(app=app, view_path='/translations/')
import webapp.views
#babel.localeselector
def get_locale():
# if the user has set up the language manually it will be stored in the session,
# so we use the locale from the user settings
try:
language = session['language']
except KeyError:
language = None
if language is not None:
print(language)
return language
return request.accept_languages.best_match(app.config['LANGUAGES'].keys())
from webapp import models
if __name__ == "__main__":
manager.run()
The class in which the socket itself is used (mqtt.py):
from webapp import socketio, app
from flask import request
from flask_mqtt import Mqtt
from flask_babel import lazy_gettext as _l
from webapp.tasks import SchedulerTask
from webapp import translate_state_gate as tr_msg
import json
import copy
import logging
mqtt = Mqtt(app)
logger = logging.getLogger('flask.flask_mqtt')
logger.disabled = True
class MqttTOAA(object):
type_topic = ["/Control", "/Data"]
m_request_state = {"comm": "3"}
m_start = {"Gate": "Start"}
m_stop = {"Gate": "Stop"}
qos_request = 1
qos_sub = 2
struct_state_devices = None
POOL_TIME = 2
end_publish = None
devices = None
schedulers_list = list()
sch_task = None
sid_mqtt = None
code_list = list()
def __init__(self, devices, lang):
mqtt._connect()
self.devices = devices
self.sch_task = SchedulerTask()
if lang not in app.config['LANGUAGES'].keys():
lang = 'ru'
self.dict_gate = {"dict_state_button": {'con_Clos': tr_msg.MessageGate.t_message[lang]["f_open"],
'con_Open': tr_msg.MessageGate.t_message[lang]["f_close"],
"fl_OpenClos": (tr_msg.MessageGate.t_message[lang]["f_continue"],
tr_msg.MessageGate.t_message[lang]["f_stop"],
tr_msg.MessageGate.t_message[lang]["f_abort"])},
"dict_state_text": {tr_msg.MessageGate.t_message[lang]["f_open"]:\
tr_msg.MessageGate.t_message[lang]["ps_close"],
tr_msg.MessageGate.t_message[lang]["f_close"]:\
tr_msg.MessageGate.t_message[lang]["ps_open"],
tr_msg.MessageGate.t_message[lang]["f_continue"]:\
tr_msg.MessageGate.t_message[lang]["ps_stop"],
tr_msg.MessageGate.t_message[lang]["f_abort"]:\
tr_msg.MessageGate.t_message[lang]["pr_close"],
tr_msg.MessageGate.t_message[lang]["f_stop"]:\
(tr_msg.MessageGate.t_message[lang]["pr_open"],
tr_msg.MessageGate.t_message[lang]["pr_close"],
tr_msg.MessageGate.t_message[lang]["pr_move"])},
"dict_type_element": {"button": u'', "text": u'', "device_code": u'', },
"state_gate": {},
"position": {"state": u'', "stop": False},
"reverse": False,
}
self.close_msg = tr_msg.MessageGate.t_message[lang]["pr_close"]
self.open_msg = tr_msg.MessageGate.t_message[lang]["pr_open"]
self.create_devices_dict()
self.handle_mqtt_connect()
self.mqtt_onmessage = mqtt.on_message()(self._handle_mqtt_message)
self.mqtt_onlog = mqtt.on_log()(self._handle_logging)
self.socketio_error = socketio.on_error()(self._handle_error)
self.handle_change_state = socketio.on('change_state')(self._handle_change_state)
self.handle_on_connect = socketio.on('connect')(self._handle_on_connect)
self.handle_unsubscribe_all = socketio.on('unsubscribe_all')(self._handle_unsubscribe_all)
def _handle_on_connect(self):
self.sid_mqtt = request.sid
def handle_mqtt_connect(self):
task = None
for dev in self.devices:
if dev.device_code not in self.code_list:
mqtt.subscribe("BK" + dev.device_code + self.type_topic[1], self.qos_sub)
self.code_list.append(dev.device_code)
task = self.sch_task.add_scheduler_publish(dev.device_code,
mqtt,
"BK" + dev.device_code +
self.type_topic[0],
self.m_request_state,
self.qos_request,
self.POOL_TIME)
if task is not None:
self.schedulers_list.append(task)
if len(self.schedulers_list) > 0:
self.sch_task.start_schedulers()
self.code_list.clear()
#staticmethod
def _handle_error():
print(request.event["message"]) # "my error event"
print(request.event["args"]) # (data,)
#staticmethod
def _handle_unsubscribe_all():
mqtt.unsubscribe_all()
def _handle_change_state(self, code):
print(code)
# print(self.struct_state_devices[code])
message = None
if code is not None:
try:
type_g = self.struct_state_devices[code]["state_gate"]
if type_g["fl_OpenClos"] == 1:
message = self.m_stop
else:
if self.struct_state_devices[code]["reverse"] is True:
if self.struct_state_devices[code]["position"]["state"] == self.close_msg:
message = self.m_stop
self.struct_state_devices[code]["position"]["state"] = self.open_msg
else:
message = self.m_start
else:
message = self.m_start
print("Msg:" + str(message))
except Exception as ex:
print(ex)
if message is not None:
mqtt.publish("BK" + code + self.type_topic[0], json.dumps(message), self.qos_request)
else:
print("Error change state " + code)
def _handle_mqtt_message(self, client, userdata, message):
# print("Get message")
# print(self.struct_state_devices)
data = dict(
topic=message.topic,
payload=message.payload.decode(),
qos=message.qos,
)
try:
data = json.loads(data['payload'])
self.gate_msg(data)
except Exception as ex:
print("Exception: " + str(ex))
#staticmethod
def _handle_logging(self, client, userdata, level, buf):
print(level, buf)
pass
def create_devices_dict(self):
if self.struct_state_devices is None:
self.struct_state_devices = dict()
for dev in self.devices:
self.struct_state_devices[dev.device_code] = self.dict_gate.copy()
if dev.typedev.reverse:
self.struct_state_devices[dev.device_code]['reverse'] = True
def gate_msg(self, data):
k = ""
code = data["esp_id"][2:]
dict_dev = copy.deepcopy(self.struct_state_devices[code])
dict_dev["state_gate"] = data.copy()
try:
if dict_dev["state_gate"]["con_Clos"] == 0: # ворота закрыты
# print("1")
k = "con_Clos"
dict_dev["position"]["state"] = k
dict_dev["position"]["stop"] = False
elif dict_dev["state_gate"]["con_Open"] == 0: # ворота открыты
# print("2")
k = "con_Open"
dict_dev["position"]["state"] = k
dict_dev["position"]["stop"] = False
elif dict_dev["state_gate"]["fl_OpenClos"] == 0:
# print("3")
k = "fl_OpenClos"
# обратный ход ворот при закрытии
if dict_dev["position"]["state"] == self.close_msg and dict_dev["reverse"] is True:
# print("4")
k1 = 1
k2 = 0
dict_dev["dict_type_element"]["text"] = \
dict_dev["dict_state_text"][dict_dev["dict_state_button"][k][k1]][k2]
dict_dev["position"]["stop"] = False
else:
# print("5")
k1 = 0
dict_dev["dict_type_element"]["text"] = \
dict_dev["dict_state_text"][dict_dev["dict_state_button"][k][k1]]
dict_dev["position"]["stop"] = True
elif dict_dev["state_gate"]["fl_OpenClos"] == 1:
# print("6")
k = "fl_OpenClos"
if len(dict_dev["position"]["state"]) == 0:
# print("7")
k1 = 1
k2 = 2
dict_dev["dict_type_element"]["text"] = \
dict_dev["dict_state_text"][dict_dev["dict_state_button"][k][k1]][k2]
elif dict_dev["position"]["state"] == "con_Clos" or \
dict_dev["position"]["state"] == self.open_msg:
if dict_dev["position"]["stop"]:
# print("8")
k1 = 1
k2 = 1
dict_dev["position"]["stop"] = False
dict_dev["dict_type_element"]["text"] = \
dict_dev["dict_state_text"][dict_dev["dict_state_button"][k][k1]][k2]
else:
# print("9")
k1 = 1
k2 = 0
dict_dev["dict_type_element"]["text"] = \
dict_dev["dict_state_text"][dict_dev["dict_state_button"][k][k1]][k2]
elif dict_dev["position"]["state"] == "con_Open" or \
dict_dev["position"]["state"] == self.close_msg:
if dict_dev["reverse"]:
# print("10")
k1 = 2
dict_dev["dict_type_element"]["text"] = \
dict_dev["dict_state_text"][dict_dev["dict_state_button"][k][k1]]
else:
if dict_dev["position"]["stop"]:
# print("11")
k1 = 1
k2 = 0
dict_dev["position"]["stop"] = False
dict_dev["dict_type_element"]["text"] = \
dict_dev["dict_state_text"][dict_dev["dict_state_button"][k][k1]][k2]
else:
# print("12")
k1 = 1
k2 = 1
dict_dev["dict_type_element"]["text"] = \
dict_dev["dict_state_text"][dict_dev["dict_state_button"][k][k1]][k2]
if dict_dev["position"]["state"] != dict_dev["dict_type_element"]["text"]:
# print("13")
dict_dev["position"]["state"] = dict_dev["dict_type_element"]["text"]
if k == "fl_OpenClos":
dict_dev["dict_type_element"]["button"] = dict_dev["dict_state_button"][k][k1]
else:
dict_dev["dict_type_element"]["button"] = dict_dev["dict_state_button"][k]
dict_dev["dict_type_element"]["text"] = \
dict_dev["dict_state_text"][dict_dev["dict_state_button"][k]]
except Exception as ex:
print("Exception (gate_msg): " + str(ex))
dict_dev["dict_type_element"]["device_code"] = data["esp_id"][2:]
dict_dev["dict_type_element"]["temp"] = data["temp_1"]
dict_dev["dict_type_element"]["button"] = copy.deepcopy(str(dict_dev["dict_type_element"]["button"]))
dict_dev["dict_type_element"]["text"] = copy.deepcopy(str(dict_dev["dict_type_element"]["text"]))
self.struct_state_devices[code] = copy.deepcopy(dict_dev)
socketio.emit('mqtt_message', data=dict_dev["dict_type_element"], room=self.sid_mqtt)
# print(dict_dev["state_gate"]["esp_id"] + str(dict_dev["dict_type_element"]))
When you use uWSGI, the async_mode should be gevent_uwsgi:
socketio = SocketIO(app, async_mode='gevent_uwsgi')

Why are instance being already created being deleted and recreated?

I have created the resources with Terraform 0.12.6. However, without making any changes to the code and doing "terraform apply" the process wants to destroy the existing EC2 and rebuild them. I would like to know why it is doing so and what is incorrect below.
resource "aws_instance" "web_ui" {
count = 2
ami = data.aws_ami.ami.id
instance_type = var.type_m5lg
associate_public_ip_address = false
key_name = var.key_name
security_groups = [var.vpc_security_group_ids, var.sg_devops, var.sg_common]
subnet_id = (data.aws_subnet.subnetid)[count.index].id
root_block_device {
delete_on_termination = true
}
ebs_block_device {
device_name = "/dev/sdb"
volume_size = "200"
volume_type = "gp2"
delete_on_termination = true
}
tags = "${merge(
local.common_tags,
map(
"Name", "${var.name}-${var.prog}-${var.env}${count.index + 1}-${var.ec2_name_web}-use1.xyz.com"
)
)}"
}
Changing the
security_groups = [var.vpc_security_group_ids, var.sg_devops, var.sg_common]
TO
vpc_security_group_ids = [var.vpc_security_group_ids, var.sg_devops, var.sg_common]
fixed the issue.
Based on the comment from #stack72 hashicorp/terraform#7853

Problems with TCP in VACaMobil

I am having some problems with TCP in VACaMobil.
I am using two TCP modules built up in Inet: TCPBasicClientApp and TCPEchoApp.
The former works as a client and the former works as a server.
The number of cars (the module name is "coche") in the simulation is 100, and there are five concurrent transmissions:
CLIENT SERVER
Car 0 --> Car 99
Car 1 --> Car 98
Car 2 --> Car 97
Car 3 --> Car 96
Car 4 --> Car 95
I am also using three routing protocols: AODV, DYMO and OLSR.
However, regardless of the configuration that I run, errors like this ocurr in the simulation:
Error in module (TCPBasicClientApp) Highway.coche[*].tcpApp[0] (id = 35) at event #49519, t = 166: IPvXAddressResolver: module 'coche[98]' not found.
In this case, using AODV, the car 1 cannot communicate with the car 98. I learned that VACaMobil, after a time, deletes some cars while it is creating others beyond 100,
such as "coche[110]", "coche[115]", etc.
I don't understand why it keeps deleting and creating nodes; I thought that VACaMobil really generated a constant number of vehicles.
How can I fix this? Any help is appreciated.
Here is the omnetpp.ini:
[General]
network = Highway
debug-on-errors = false
cmdenv-express-mode = true
cmdenv-autoflush = true
cmdenv-status-frequency = 10000000s
#repeat = 10
tkenv-plugin-path = ../../../etc/plugins
tkenv-image-path = bitmaps
check-signals = true
**.manager.**.scalar-recording = true
**.manager.**.vector-recording = true
**.manetrouting.**.scalar-recording = true
**.movStats.**.scalar-recording = true
**.movStats.**.vector-recording = true
**.mac.**.scalar-recording = true
**.mac.**.vector-recording = true
**.scalar-recording = true
**.vector-recording = true
#ChannelControl
*.channelControl.carrierFrequency = 2.4GHz
*.channelControl.pMax = 2mW
*.channelControl.sat = -110dBm
*.channelControl.alpha = 2
*.channelControl.numChannels = 1
# TraCIScenarioManagerLaunchd
*.manager.updateInterval = 1s
*.manager.host = "localhost"
*.manager.port = 9999
*.manager.moduleType = "rcdp9.TAdhocHost"
*.manager.moduleName = "coche"
*.manager.moduleDisplayString = ""
*.manager.autoShutdown = true
*.manager.margin = 25
*.manager.warmUpSeconds = 0
*.manager.launchConfig = xmldoc("VACaMobil/Milan/downtown.launch.xml")
*.manager.getStatistics = true
*.manager.statFiles = "${resultdir}/${configname}-${runnumber}-"
# nic settings
**.wlan[*].bitrate = 24Mbps
**.wlan[*].opMode = "g"
**.wlan[*].mgmt.frameCapacity = 10
**.wlan[*].mgmtType = "Ieee80211MgmtAdhoc"
**.wlan[*].mac.basicBitrate = 24Mbps
**.wlan[*].mac.controlBitrate = 24Mbps
**.wlan[*].mac.address = "auto"
**.wlan[*].mac.maxQueueSize = 14
**.wlan[*].mac.rtsThresholdBytes = 3000B
**.wlan[*].mac.retryLimit = 7
**.wlan[*].mac.cwMinData = 7
**.wlan[*].radio.transmitterPower = 2mW
**.wlan[*].radio.thermalNoise = -110dBm
**.wlan[*].radio.sensitivity = -85dBm
**.wlan[*].radio.pathLossAlpha = 2
**.wlan[*].radio.snirThreshold = 4dB
**.channelNumber = 0
**.coche.networkLayer.configurator.networkConfiguratorModule = "configurator"
# manet routing
**.routingProtocol = ${"AODVUU", "DYMO", "OLSR"}
**.tcpAlgorithmClass = "TCPNewReno"
**.coche[0..4].numTcpApps = 1
**.coche[0..4].tcpApp[*].typename = "TCPBasicClientApp"
**.coche[0..4].tcpApp[*].localPort = -1
**.coche[0..4].tcpApp[*].connectPort = 1000
**.coche[0..4].tcpApp[*].dataTransferMode = "bytecount"
**.coche[0..4].tcpApp[*].startTime = 10s
**.coche[0..4].tcpApp[*].thinkTime = 1s
**.coche[0..4].tcpApp[*].idleInterval = 3s
**.coche[0..4].tcpApp[*].requestLength = 5000000B
**.coche[5..94].numTcpApps = 0
**.coche[95..99].numTcpApps = 1
**.coche[95..99].tcpApp[*].typename = "TCPEchoApp"
**.coche[95..99].tcpApp[*].localPort = 1000
**.coche[95..99].tcpApp[*].dataTransferMode = "bytecount"
**.coche[0].tcpApp[*].connectAddress = "coche[99]"
**.coche[1].tcpApp[*].connectAddress = "coche[98]"
**.coche[2].tcpApp[*].connectAddress = "coche[97]"
**.coche[3].tcpApp[*].connectAddress = "coche[96]"
**.coche[4].tcpApp[*].connectAddress = "coche[95]"
**.meanNumberOfCars = 100
**.autoShutdown = false
Here is TAdhocHost.ned:
package rcdp9;
import inet.networklayer.IManetRouting;
import inet.networklayer.autorouting.ipv4.HostAutoConfigurator2;
import inet.nodes.inet.AdhocHost;
module TAdhocHost extends AdhocHost
{
parameters:
#display("i=device/cellphone");
mobilityType = default("TraCIMobility");
IPForward = true;
submodules:
ac_wlan: HostAutoConfigurator2 {
#display("p=127,240");
}
connections:
}
Here is Highway.ned:
package rcdp9;
import inet.world.VACaMobil.VACaMobil;
import inet.networklayer.autorouting.ipv4.IPv4NetworkConfigurator;
import inet.networklayer.autorouting.ipv4.HostAutoConfigurator;
import inet.nodes.inet.AdhocHost;
import inet.world.radio.ChannelControl;
import inet.world.traci.TraCIScenarioManagerLaunchd;
network Highway
{
submodules:
configurator: IPv4NetworkConfigurator {
#display("p=396,221");
}
channelControl: ChannelControl {
#display("p=396,310");
}
manager: VACaMobil {
#display("p=322,405");
}
connections allowunconnected:
}
From what I understand, VACaMobil is built on Veins.
Veins creates a new network node for every vehicle that starts driving. When the corresponding vehicle stops driving (having arrived at its destination) Veins deletes the network node. It never re-uses the same node index.
Thus, in your example, the first vehicle that starts driving will be coche[0]. The next vehicle that starts driving will be coche[1] - independent of whether coche[0] already arrived or is still driving.

Resources