I have an application which needs to listen on ipv6 for a specific port. I have exposed it in the docker file, and I'm running the docker engine with DOCKER_OPTS="--ipv6", but I can't get docker to properly map the ports over ipv6.
Port shows up when using docker inspect {name} (see below for output - redacted to get rid of superfluous stuff). For some reason I can't get it to route the IPv6 stuff, however (and it looks like the ports are only being exposed as ipv4 from the output below).
How do I go about getting the port (9084) to be accessible via ipv6 connections to the host, in the same way that the other two ports are accessible over ipv4 connections to the host?
[
{
// [...]
"HostConfig": {
// [...]
"NetworkMode": "default",
"PortBindings": {
"8883/tcp": [
{
"HostIp": "0.0.0.0",
"HostPort": "8883"
}
],
"9084/tcp": [
{
"HostIp": "0.0.0.0",
"HostPort": "9084"
}
],
"9094/tcp": [
{
"HostIp": "0.0.0.0",
"HostPort": "9094"
}
]
},
// [...]
"Dns": [],
"DnsOptions": [],
"DnsSearch": [],
"ExtraHosts": null,
"GroupAdd": null,
"IpcMode": "",
"Cgroup": "",
"Links": null,
// [...]
},
// [...]
"Config": {
"Hostname": "40c7aa89b266",
"Domainname": "",
"User": "",
"AttachStdin": false,
"AttachStdout": false,
"AttachStderr": false,
"ExposedPorts": {
"8883/tcp": {},
"9084/tcp": {},
"9094/tcp": {}
},
// [...]
},
"NetworkSettings": {
"Bridge": "",
"SandboxID": "97923c78571e880f267b56f4d86a7338e2f7a878d20b9961ae208af9c2bc6b64",
"HairpinMode": false,
"LinkLocalIPv6Address": "",
"LinkLocalIPv6PrefixLen": 0,
"Ports": {
"8883/tcp": [
{
"HostIp": "0.0.0.0",
"HostPort": "8883"
}
],
"9084/tcp": [
{
"HostIp": "0.0.0.0",
"HostPort": "9084"
}
],
"9094/tcp": [
{
"HostIp": "0.0.0.0",
"HostPort": "9094"
}
]
},
"SandboxKey": "/var/run/docker/netns/97923c78571e",
"SecondaryIPAddresses": null,
"SecondaryIPv6Addresses": null,
"EndpointID": "cdddc3cd29e3cb40f68cf696f0d6dffadd08aa5bf256cd12cef180f04795ee83",
"Gateway": "172.17.0.1",
"GlobalIPv6Address": "fe80::242:ac11:4",
"GlobalIPv6PrefixLen": 64,
"IPAddress": "172.17.0.4",
"IPPrefixLen": 16,
"IPv6Gateway": "fe80::1",
"MacAddress": "02:42:ac:11:00:04",
"Networks": {
"bridge": {
"IPAMConfig": null,
"Links": null,
"Aliases": null,
"NetworkID": "3371009b227abe4e8992fbd9a56ea662ee76fd692b6d7080045d5deb055e29e3",
"EndpointID": "cdddc3cd29e3cb40f68cf696f0d6dffadd08aa5bf256cd12cef180f04795ee83",
"Gateway": "172.17.0.1",
"IPAddress": "172.17.0.4",
"IPPrefixLen": 16,
"IPv6Gateway": "fe80::1",
"GlobalIPv6Address": "fe80::242:ac11:4",
"GlobalIPv6PrefixLen": 64,
"MacAddress": "02:42:ac:11:00:04"
}
}
}
}
]
Related
I created a table and seed with dynamodb-local
But I can't get the item from the dynamodb-local table with appsync-simulator
{
"data": null,
"errors": [
{
"message": "Cannot return null for non-nullable field Query.getPerson.",
"locations": [
{
"line": 2,
"column": 3
}
],
"path": [
"getPerson"
]
}
]
}
Am I misconfiguring serverless.ts?
//dynamodb-local
dynamodb: {
stages: [
"dev",
],
start: {
port: 8000,
inMemory: true,
migrate: true,
seed: true,
},
//seed
seed: {
deb: {
sources: [
{
table: "patients",
sources: ["./src/migrations/patients.json"]
}
]
}
}
},
//appsync-simulator
"appsync-simulator": {
location: ".esbuild",
apiKey: "da2-fakeApiId123456",
watch: false,
dynamoDb: {
endpoint: "http://localhost:8000",
},
},
Modules version
"serverless-appsync-plugin": "^1.13.0",
"serverless-appsync-simulator": "^0.20.0",
"serverless-dynamodb-local": "^0.2.40",
"serverless-offline": "^8.8.0",
"serverless": "^3.0.0",
When I query a resolver in my GraphQL API, in which I have added a $util.error($ctx) to return the context object, I get the following result (removed unnecessary values).
{
"data": {
"listXData": null
},
"errors": [
{
"message": {
"arguments": {},
"args": {},
"info": {
"fieldName": "listXData",
"variables": {},
"parentTypeName": "Query",
"selectionSetList": [
"items",
"items/id",
"items/createdAt",
"items/updatedAt",
"nextToken"
],
"selectionSetGraphQL": "{\n items {\n id\n createdAt\n updatedAt\n }\n nextToken\n}"
},
"request": {...},
"identity": {
"sub": "",
"issuer": "",
"username": "013fe9d2-95f7-4885-83ec-b7e2e0a1423f",
"sourceIp": "",
"claims": {
"origin_jti": "",
"sub": "",
"event_id": "",
"token_use": "",
"scope": "",
"auth_time": ,
"iss": "",
"exp": ,
"iat": ,
"jti": "",
"client_id": "",
"username": "013fe9d2-95f7-4885-83ec-b7e2e0a1423f"
},
"defaultAuthStrategy": "ALLOW"
},
"stash": {},
"source": null,
"result": {
"items": [],
"scannedCount": 0,
"nextToken": null
},
"error": null,
"prev": {
"result": {}
}
},
"errorType": null,
"data": null,
"errorInfo": null,
"path": [
"listXData"
],
"locations": [
{
"line": 2,
"column": 3,
"sourceName": "GraphQL request"
}
]
}
]
}
As you can see, the username is an ID, however I would prefer to (also) have the email. Is it possible to get the user email (within the Velocity template)?
Let me know if I need to add more details or if my question is unclear.
The identity context only returns back the Cognito username for the user pool. You will need to setup pipeline functions to perform additional queries to get your user information. Here is one intro to setting them up.
At this point, it seems that it is not possible to do this purely by vtl.
I have implemented it using a lambda function, as follow:
Lambda function (node):
/* Amplify Params - DO NOT EDIT
ENV
REGION
Amplify Params - DO NOT EDIT */
const aws = require('aws-sdk')
const cognitoidentityserviceprovider = new aws.CognitoIdentityServiceProvider({
apiVersion: '2016-04-18',
region: 'eu-west-1'
})
exports.handler = async (context, event, callback) => {
if (!context.identity?.username) {
callback('Not signed in')
}
const params = {
'AccessToken': context.request.headers.authorization
}
const result = await cognitoidentityserviceprovider.getUser(params).promise()
const email = result.UserAttributes.find(attribute => attribute.Name === 'email')
callback(null, JSON.stringify({ email }))
}
CustomResources.json
{
"AWSTemplateFormatVersion": "2010-09-09",
"Description": "An auto-generated nested stack.",
"Metadata": {...},
"Parameters": {...},
"Resources": {
"GetEmailLambdaDataSourceRole": {
"Type": "AWS::IAM::Role",
"Properties": {
"RoleName": {
"Fn::If": [
"HasEnvironmentParameter",
{
"Fn::Join": [
"-",
[
"GetEmail17ec",
{
"Ref": "GetAttGraphQLAPIApiId"
},
{
"Ref": "env"
}
]
]
},
{
"Fn::Join": [
"-",
[
"GetEmail17ec",
{
"Ref": "GetAttGraphQLAPIApiId"
}
]
]
}
]
},
"AssumeRolePolicyDocument": {
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Principal": {
"Service": "appsync.amazonaws.com"
},
"Action": "sts:AssumeRole"
}
]
},
"Policies": [
{
"PolicyName": "InvokeLambdaFunction",
"PolicyDocument": {
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Action": [
"lambda:InvokeFunction"
],
"Resource": {
"Fn::If": [
"HasEnvironmentParameter",
{
"Fn::Sub": [
"arn:aws:lambda:${AWS::Region}:${AWS::AccountId}:function:GetEmail-${env}",
{
"env": {
"Ref": "env"
}
}
]
},
{
"Fn::Sub": [
"arn:aws:lambda:${AWS::Region}:${AWS::AccountId}:function:GetEmail",
{}
]
}
]
}
}
]
}
}
]
}
},
"GetEmailLambdaDataSource": {
"Type": "AWS::AppSync::DataSource",
"Properties": {
"ApiId": {
"Ref": "AppSyncApiId"
},
"Name": "GetEmailLambdaDataSource",
"Type": "AWS_LAMBDA",
"ServiceRoleArn": {
"Fn::GetAtt": [
"GetEmailLambdaDataSourceRole",
"Arn"
]
},
"LambdaConfig": {
"LambdaFunctionArn": {
"Fn::If": [
"HasEnvironmentParameter",
{
"Fn::Sub": [
"arn:aws:lambda:${AWS::Region}:${AWS::AccountId}:function:GetEmail-${env}",
{
"env": {
"Ref": "env"
}
}
]
},
{
"Fn::Sub": [
"arn:aws:lambda:${AWS::Region}:${AWS::AccountId}:function:GetEmail",
{}
]
}
]
}
}
},
"DependsOn": "GetEmailLambdaDataSourceRole"
},
"InvokeGetEmailLambdaDataSource": {
"Type": "AWS::AppSync::FunctionConfiguration",
"Properties": {
"ApiId": {
"Ref": "AppSyncApiId"
},
"Name": "InvokeGetEmailLambdaDataSource",
"DataSourceName": "GetEmailLambdaDataSource",
"FunctionVersion": "2018-05-29",
"RequestMappingTemplateS3Location": {
"Fn::Sub": [
"s3://${S3DeploymentBucket}/${S3DeploymentRootKey}/pipelineFunctions/${ResolverFileName}",
{
"S3DeploymentBucket": {
"Ref": "S3DeploymentBucket"
},
"S3DeploymentRootKey": {
"Ref": "S3DeploymentRootKey"
},
"ResolverFileName": {
"Fn::Join": [
".",
[
"InvokeGetEmailLambdaDataSource",
"req",
"vtl"
]
]
}
}
]
},
"ResponseMappingTemplateS3Location": {
"Fn::Sub": [
"s3://${S3DeploymentBucket}/${S3DeploymentRootKey}/pipelineFunctions/${ResolverFileName}",
{
"S3DeploymentBucket": {
"Ref": "S3DeploymentBucket"
},
"S3DeploymentRootKey": {
"Ref": "S3DeploymentRootKey"
},
"ResolverFileName": {
"Fn::Join": [
".",
[
"InvokeGetEmailLambdaDataSource",
"res",
"vtl"
]
]
}
}
]
}
},
"DependsOn": "GetEmailLambdaDataSource"
},
"IsOrganizationMember": {
"Type": "AWS::AppSync::FunctionConfiguration",
"Properties": {
"FunctionVersion": "2018-05-29",
"ApiId": {
"Ref": "AppSyncApiId"
},
"Name": "IsOrganizationMember",
"DataSourceName": "PermissionsPerOrganizationTable",
"RequestMappingTemplateS3Location": {
"Fn::Sub": [
"s3://${S3DeploymentBucket}/${S3DeploymentRootKey}/resolvers/Query.isOrganizationMember.req.vtl",
{
"S3DeploymentBucket": {
"Ref": "S3DeploymentBucket"
},
"S3DeploymentRootKey": {
"Ref": "S3DeploymentRootKey"
}
}
]
},
"ResponseMappingTemplateS3Location": {
"Fn::Sub": [
"s3://${S3DeploymentBucket}/${S3DeploymentRootKey}/resolvers/Query.isOrganizationMember.res.vtl",
{
"S3DeploymentBucket": {
"Ref": "S3DeploymentBucket"
},
"S3DeploymentRootKey": {
"Ref": "S3DeploymentRootKey"
}
}
]
}
}
},
"OrganizationAccessPipeline": {
"Type": "AWS::AppSync::Resolver",
"Properties": {
"ApiId": {
"Ref": "AppSyncApiId"
},
"TypeName": "Query",
"Kind": "PIPELINE",
"FieldName": "listXData",
"PipelineConfig": {
"Functions": [
{
"Fn::GetAtt": [
"InvokeGetEmailLambdaDataSource",
"FunctionId"
]
},
{
"Fn::GetAtt": [
"IsOrganizationMember",
"FunctionId"
]
}
]
},
"RequestMappingTemplate": "{}",
"ResponseMappingTemplate": "$util.toJson($ctx.result)"
}
}
},
"Conditions": {...},
"Outputs": {...}
}
The lambda is created with the CLI and IsOrganizationMember is a regular VTL which has the user email in the $context.prev.result.
When trying to upload a new theme on my fresh Wordpress install, I get the 413 Request Entity Too Large.
I've read a lot of other questions on StackOverflow and tried these annotations:
nginx.ingress.kubernetes.io/secure-backends: "true"
nginx.ingress.kubernetes.io/proxy-body-size: "100m"
nginx.ingress.kubernetes.io/client-max-body-size: "100m"
nginx.org/client-max-body-size: "100m"
What works:
Uploading files smaller than 1MB.
I guess it has to do with the Bitnami standard nginx configuration. As seen here . But I have no clue on how to add this to my current configuration.
Thanks for helping me out!
**Wordpress Debug Information:**
PHP max input variables 1000
PHP time limit 30
PHP memory limit 512M
Max input time 60
Upload max filesize 40M
PHP post max size 40M
The main WordPress directory Writable
The wp-content directory Writable
The uploads directory Writable
The plugins directory Writable
The themes directory Writable
Deployment Yaml
{
"kind": "Deployment",
"apiVersion": "extensions/v1beta1",
"metadata": {
"name": "wordpressdf99e",
"namespace": "default",
"selfLink": "/apis/extensions/v1beta1/namespaces/default/deployments/wordpressdf99e",
"uid": "f39369f1-6c1f-11ea-8b29-063deb7a2778",
"resourceVersion": "18492542",
"generation": 2,
"creationTimestamp": "2020-03-22T09:31:47Z",
"labels": {
"app.kubernetes.io/instance": "wordpressdf99e",
"app.kubernetes.io/managed-by": "Tiller",
"app.kubernetes.io/name": "wordpress",
"helm.sh/chart": "wordpress-9.0.4"
},
"annotations": {
"deployment.kubernetes.io/revision": "2"
}
},
"spec": {
"replicas": 1,
"selector": {
"matchLabels": {
"app.kubernetes.io/instance": "wordpressdf99e",
"app.kubernetes.io/name": "wordpress"
}
},
"template": {
"metadata": {
"creationTimestamp": null,
"labels": {
"app.kubernetes.io/instance": "wordpressdf99e",
"app.kubernetes.io/managed-by": "Tiller",
"app.kubernetes.io/name": "wordpress",
"helm.sh/chart": "wordpress-9.0.4"
},
"annotations": {
"kubectl.kubernetes.io/restartedAt": "2020-03-22T12:23:46+01:00"
}
},
"spec": {
"volumes": [
{
"name": "wordpress-data",
"persistentVolumeClaim": {
"claimName": "wordpressdf99e"
}
}
],
"containers": [
{
"name": "wordpress",
"image": "docker.io/bitnami/wordpress:5.3.2-debian-10-r48",
"ports": [
{
"name": "http",
"containerPort": 8080,
"protocol": "TCP"
},
{
"name": "https",
"containerPort": 8443,
"protocol": "TCP"
}
],
"env": [
{
"name": "ALLOW_EMPTY_PASSWORD",
"value": "yes"
},
{
"name": "MARIADB_HOST",
"value": "wordpressdf99e-mariadb"
},
{
"name": "MARIADB_PORT_NUMBER",
"value": "3306"
},
{
"name": "WORDPRESS_DATABASE_NAME",
"value": "bitnami_wordpress"
},
{
"name": "WORDPRESS_DATABASE_USER",
"value": "bn_wordpress"
},
{
"name": "WORDPRESS_DATABASE_PASSWORD",
"valueFrom": {
"secretKeyRef": {
"name": "wordpressdf99e-mariadb",
"key": "mariadb-password"
}
}
},
{
"name": "WORDPRESS_USERNAME",
"value": "user"
},
{
"name": "WORDPRESS_PASSWORD",
"valueFrom": {
"secretKeyRef": {
"name": "wordpressdf99e",
"key": "wordpress-password"
}
}
},
{
"name": "WORDPRESS_EMAIL",
"value": "user#example.com"
},
{
"name": "WORDPRESS_FIRST_NAME",
"value": "FirstName"
},
{
"name": "WORDPRESS_LAST_NAME",
"value": "LastName"
},
{
"name": "WORDPRESS_HTACCESS_OVERRIDE_NONE",
"value": "no"
},
{
"name": "WORDPRESS_BLOG_NAME",
"value": "User's Blog!"
},
{
"name": "WORDPRESS_SKIP_INSTALL",
"value": "no"
},
{
"name": "WORDPRESS_TABLE_PREFIX",
"value": "wp_"
},
{
"name": "WORDPRESS_SCHEME",
"value": "http"
}
],
"resources": {
"requests": {
"cpu": "300m",
"memory": "512Mi"
}
},
"volumeMounts": [
{
"name": "wordpress-data",
"mountPath": "/bitnami/wordpress",
"subPath": "wordpress"
}
],
"livenessProbe": {
"httpGet": {
"path": "/wp-login.php",
"port": "http",
"scheme": "HTTP"
},
"initialDelaySeconds": 120,
"timeoutSeconds": 5,
"periodSeconds": 10,
"successThreshold": 1,
"failureThreshold": 6
},
"readinessProbe": {
"httpGet": {
"path": "/wp-login.php",
"port": "http",
"scheme": "HTTP"
},
"initialDelaySeconds": 30,
"timeoutSeconds": 5,
"periodSeconds": 10,
"successThreshold": 1,
"failureThreshold": 6
},
"terminationMessagePath": "/dev/termination-log",
"terminationMessagePolicy": "File",
"imagePullPolicy": "IfNotPresent"
}
],
"restartPolicy": "Always",
"terminationGracePeriodSeconds": 30,
"dnsPolicy": "ClusterFirst",
"securityContext": {
"runAsUser": 1001,
"fsGroup": 1001
},
"schedulerName": "default-scheduler",
"hostAliases": [
{
"ip": "127.0.0.1",
"hostnames": [
"status.localhost"
]
}
]
}
},
"strategy": {
"type": "RollingUpdate",
"rollingUpdate": {
"maxUnavailable": "25%",
"maxSurge": "25%"
}
},
"revisionHistoryLimit": 10,
"progressDeadlineSeconds": 600
},
"status": {
"observedGeneration": 2,
"replicas": 1,
"updatedReplicas": 1,
"readyReplicas": 1,
"availableReplicas": 1,
"conditions": [
{
"type": "Progressing",
"status": "True",
"lastUpdateTime": "2020-03-22T11:34:28Z",
"lastTransitionTime": "2020-03-22T09:31:47Z",
"reason": "NewReplicaSetAvailable",
"message": "ReplicaSet \"wordpressdf99e-6bcf574f64\" has successfully progressed."
},
{
"type": "Available",
"status": "True",
"lastUpdateTime": "2020-03-22T12:14:55Z",
"lastTransitionTime": "2020-03-22T12:14:55Z",
"reason": "MinimumReplicasAvailable",
"message": "Deployment has minimum availability."
}
]
}
}
If you were using an external Nginx Ingress Controller you would create a configmap for your ingress controllers and set the proxy-body-size to the size needed.
The default size for it is 1m
There is also a bitnami wordpress nginx image
Where you can set the Server block for it directly and add client_max_body_size
https://github.com/bitnami/bitnami-docker-wordpress-nginx/blob/master/test.yaml#L9
Thanks for your comment strongjz!
The one line of code that was needed to solve this problem was:
ingress.bluemix.net/client-max-body-size: "500m"
Once I've added this to the annotations of my ingress. The issue was solved!
Does anyone have experience in writing Azure Policy for Analysis Services? I am stuck on getting one completed. I am attempting to create policy that enforces what IPs can be added to the public IP side. So far I have this and it does work:
{
"parameters": {
"allowedAddressRanges": {
"type": "Array",
"metadata": {
"displayName": "Address Range",
"description": "The list of allowed external IP address ranges"
}
}
},
"policyRule": {
"if": {
"allOf": [
{
"field": "type",
"equals": "Microsoft.AnalysisServices/servers"
},
{
"not": {
"field": "Microsoft.AnalysisServices/servers/ipV4FirewallSettings.firewallRules[*]",
"in": "[parameters('allowedAddressRanges')]"
}
}
]
},
"then": {
"effect": "audit"
}
}
}
Do I need to go further down the alias path to something like:
"Microsoft.AnalysisServices/servers/ipV4FirewallSettings.firewallRules[*].rangeStart"
This is an old thread but since it hasn't been answered yet, perhaps someone can benefit from my findings. Looking at the aliases available for Azure Analysis Services we can notice the following :
Microsoft.AnalysisServices/servers/ipV4FirewallSettings.firewallRules
Microsoft.AnalysisServices/servers/ipV4FirewallSettings.firewallRules[*]
Microsoft.AnalysisServices/servers/ipV4FirewallSettings.firewallRules[*].firewallRuleName
Microsoft.AnalysisServices/servers/ipV4FirewallSettings.firewallRules[*].rangeStart
Microsoft.AnalysisServices/servers/ipV4FirewallSettings.firewallRules[*].rangeEnd
Based on the notation above, I had to go down until "rangeStart" and "rangeEnd". This is what works for me:
{
"mode": "All",
"policyRule": {
"if": {
"allOf": [
{
"field": "type",
"equals": "Microsoft.AnalysisServices/servers"
},
{
"not": {
"anyOf": [
{
"field": "Microsoft.AnalysisServices/servers/ipV4FirewallSettings.firewallRules[*].rangeStart",
"in": "[parameters('allowedAddressRanges')]"
},
{
"field": "Microsoft.AnalysisServices/servers/ipV4FirewallSettings.firewallRules[*].rangeEnd",
"in": "[parameters('allowedAddressRanges')]"
}
]
}
}
]
},
"then": {
"effect": "[parameters('effect')]"
}
},
"parameters": {
"effect": {
"type": "String",
"metadata": {
"displayName": "Effect",
"description": "The effect determines what happens when the policy rule is evaluated to match"
},
"allowedValues": [
"Audit",
"Deny",
"Disabled"
],
"defaultValue": "Deny"
},
"allowedAddressRanges": {
"type": "Array",
"metadata": {
"displayName": "Address Range",
"description": "The list of allowed IP address ranges"
},
"allowedValues": [
"0.0.0.0",
"0.0.0.0",
"0.0.0.0",
"0.0.0.0",
"0.0.0.0"
],
"defaultValue": [
"0.0.0.0",
"0.0.0.0",
"0.0.0.0",
"0.0.0.0",
"0.0.0.0"
]
}
}
}
reference: https://learn.microsoft.com/en-us/azure/templates/microsoft.analysisservices/servers#IPv4FirewallRule
I have a weird error that has been bugging me for a while now. When I run the server local the site shows up perfect but when I try to access it on my aws EC2 I get the error "ActiveRecord::PendingMigrationError
Migrations are pending. To resolve this issue, run: bin/rake db:migrate RAILS_ENV=development"
So I ran the command "bin/rake db:migrate RAILS_ENV=development" and this prints out.
bin/rake db:migrate RAILS_ENV=development
Running via Spring preloader in process 30908
== 20160812175638 CreatePosts: migrating ======================================
-- create_table(:posts)
-> 0.0014s
== 20160812175638 CreatePosts: migrated (0.0017s) =============================
== 20160813194710 DeviseCreateUsers: migrating ================================
-- create_table(:users)
-> 0.0015s
-- add_index(:users, :email, {:unique=>true})
-> 0.0005s
-- add_index(:users, :reset_password_token, {:unique=>true})
-> 0.0007s
== 20160813194710 DeviseCreateUsers: migrated (0.0031s) =======================
== 20160912112653 AddAttachmentImageToPosts: migrating ========================
-- change_table(:posts)
-> 0.0012s
== 20160912112653 AddAttachmentImageToPosts: migrated (0.0013s) ===============
but I still get the same msg. So I did some searching on the net and I have read all the pages here on this issue and haven't found nothing.
When I run "rake db:migrate:status" I see this:
Status Migration ID Migration Name
--------------------------------------------------
up 20160812175638 Create posts
up 20160813194710 Devise create users
up 20160912112653 Add attachment image to posts
And when I do "rake db:version" I get this:
Current version: 20160912112653
"rails -v" gives me
Rails 4.2.6
and "cat Gemfile | grep rails" gives me this:# Bundle edge Rails instead:
gem 'rails', github: 'rails/rails'
gem 'rails'
gem 'sass-rails', '~> 5.0'
gem 'coffee-rails', '~> 4.1.0'
# See https://github.com/rails/execjs#readme for more supported runtimes
gem 'jquery-rails'
# Turbolinks makes following links in your web application faster. Read more: https://github.com/rails/turbolinks
# Build JSON APIs with ease. Read more: https://github.com/rails/jbuilder
# bundle exec rake doc:rails generates the API under doc/api.
# gem 'capistrano-rails', group: :development
# Spring speeds up development by keeping your application running in the background. Read more: https://github.com/rails/spring
I'm at loss here, it works just fine on local and it worked just fine if I edit away /data/ from database.rb file.
default: &default
adapter: sqlite3
pool: 5
timeout: 5000
development:
<<: *default
database: db/data/development.sqlite3
test:
<<: *default
database: db/test.sqlite3
production:
<<: *default
database: db/data/production.sqlite3
But if I do that the database gets wiped as soon as I update the site with my redeploy script which is running docker-compose for me.
I quite sure that I've missed a simple thing that gives me this error tho since the site do work on the localhost.
//Emmoth
I think I know whats is the problem now, don't know how to fix it tho.
When I run docker volume ls it doesn't find any volume at all.
here is the output from docker inspect
[
{
"Id": "4b72d9a721f086706f53f08a88b5f32c959f6a28ec37d3396c14922c7750bc48",
"Created": "2016-10-09T16:48:24.678289811Z",
"Path": "rails",
"Args": [
"server",
"-b",
"0.0.0.0"
],
"State": {
"Status": "running",
"Running": true,
"Paused": false,
"Restarting": false,
"OOMKilled": false,
"Dead": false,
"Pid": 15188,
"ExitCode": 0,
"Error": "",
"StartedAt": "2016-10-09T16:48:24.925986705Z",
"FinishedAt": "0001-01-01T00:00:00Z"
},
"Image": "sha256:c8ccdcb3602d581c06264db2b24f99cd4f936713ca84c614c056743b8d89d459",
"ResolvConfPath": "/var/lib/docker/containers/4b72d9a721f086706f53f08a88b5f32c959f6a28ec37d3396c14922c7750bc48/resolv.conf",
"HostnamePath": "/var/lib/docker/containers/4b72d9a721f086706f53f08a88b5f32c959f6a28ec37d3396c14922c7750bc48/hostname",
"HostsPath": "/var/lib/docker/containers/4b72d9a721f086706f53f08a88b5f32c959f6a28ec37d3396c14922c7750bc48/hosts",
"LogPath": "/var/lib/docker/containers/4b72d9a721f086706f53f08a88b5f32c959f6a28ec37d3396c14922c7750bc48/4b72d9a721f086706f53f08a88b5f32c959f6a28ec37d3396c14922c7750bc48-json.log",
"Name": "/juridik_app_1",
"RestartCount": 0,
"Driver": "aufs",
"MountLabel": "",
"ProcessLabel": "",
"AppArmorProfile": "",
"ExecIDs": null,
"HostConfig": {
"Binds": [
"/storage/db:/usr/src/app/db/data:rw"
],
"ContainerIDFile": "",
"LogConfig": {
"Type": "json-file",
"Config": {}
},
"NetworkMode": "default",
"PortBindings": {
"3000/tcp": [
{
"HostIp": "",
"HostPort": "80"
}
]
},
"RestartPolicy": {
"Name": "always",
"MaximumRetryCount": 0
},
"AutoRemove": false,
"VolumeDriver": "",
"VolumesFrom": [],
"CapAdd": null,
"CapDrop": null,
"Dns": null,
"DnsOptions": null,
"DnsSearch": null,
"ExtraHosts": null,
"GroupAdd": null,
"IpcMode": "",
"Cgroup": "",
"Links": null,
"OomScoreAdj": 0,
"PidMode": "",
"Privileged": false,
"PublishAllPorts": false,
"ReadonlyRootfs": false,
"SecurityOpt": null,
"UTSMode": "",
"UsernsMode": "",
"ShmSize": 67108864,
"Runtime": "runc",
"ConsoleSize": [
0,
0
],
"Isolation": "",
"CpuShares": 0,
"Memory": 0,
"CgroupParent": "",
"BlkioWeight": 0,
"BlkioWeightDevice": null,
"BlkioDeviceReadBps": null,
"BlkioDeviceWriteBps": null,
"BlkioDeviceReadIOps": null,
"BlkioDeviceWriteIOps": null,
"CpuPeriod": 0,
"CpuQuota": 0,
"CpusetCpus": "",
"CpusetMems": "",
"Devices": null,
"DiskQuota": 0,
"KernelMemory": 0,
"MemoryReservation": 0,
"MemorySwap": 0,
"MemorySwappiness": -1,
"OomKillDisable": false,
"PidsLimit": 0,
"Ulimits": null,
"CpuCount": 0,
"CpuPercent": 0,
"IOMaximumIOps": 0,
"IOMaximumBandwidth": 0
},
"GraphDriver": {
"Name": "aufs",
"Data": null
},
"Mounts": [
{
"Source": "/storage/db",
"Destination": "/usr/src/app/db/data",
"Mode": "rw",
"RW": true,
"Propagation": "rprivate"
}
],
"Config": {
"Hostname": "4b72d9a721f0",
"Domainname": "",
"User": "",
"AttachStdin": false,
"AttachStdout": false,
"AttachStderr": false,
"ExposedPorts": {
"3000/tcp": {}
},
"Tty": false,
"OpenStdin": false,
"StdinOnce": false,
"Env": [
"S3_BUCKET_REGION=eu-europe-1",
"S3_SECRET_KEY=*************************",
"S3_BUCKET=juridik-assets",
"S3_KEY_ID=*********************",
"PATH=/usr/local/bundle/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin",
"RUBY_MAJOR=2.3",
"RUBY_VERSION=2.3.1",
"RUBY_DOWNLOAD_SHA256=b87c738cb2032bf4920fef8e3864dc5cf8eae9d89d8d523ce0236945c5797dcd",
"RUBYGEMS_VERSION=2.6.6",
"BUNDLER_VERSION=1.13.1",
"GEM_HOME=/usr/local/bundle",
"BUNDLE_PATH=/usr/local/bundle",
"BUNDLE_BIN=/usr/local/bundle/bin",
"BUNDLE_SILENCE_ROOT_WARNING=1",
"BUNDLE_APP_CONFIG=/usr/local/bundle"
],
"Cmd": [
"rails",
"server",
"-b",
"0.0.0.0"
],
"Image": "juridik_app",
"Volumes": {
"/usr/src/app/db/data": {}
},
"WorkingDir": "/usr/src/app",
"Entrypoint": null,
"OnBuild": null,
"Labels": {
"com.docker.compose.config-hash": "6c31573cdfdba0b4c056f7af83e652b728c4a56083a6c7eabd8634b2dbfccb47",
"com.docker.compose.container-number": "1",
"com.docker.compose.oneoff": "False",
"com.docker.compose.project": "juridik",
"com.docker.compose.service": "app",
"com.docker.compose.version": "1.8.0"
}
},
"NetworkSettings": {
"Bridge": "",
"SandboxID": "de9a9bfc8196ee65d89e09dc0a41acbb1f28830229d4e53fe057832a5419a778",
"HairpinMode": false,
"LinkLocalIPv6Address": "",
"LinkLocalIPv6PrefixLen": 0,
"Ports": {
"3000/tcp": [
{
"HostIp": "0.0.0.0",
"HostPort": "80"
}
]
},
"SandboxKey": "/var/run/docker/netns/de9a9bfc8196",
"SecondaryIPAddresses": null,
"SecondaryIPv6Addresses": null,
"EndpointID": "ee42a322ee7de01529408d782a93556333cf9e368934ccfe03e98c1223ae44e3",
"Gateway": "172.17.0.1",
"GlobalIPv6Address": "",
"GlobalIPv6PrefixLen": 0,
"IPAddress": "172.17.0.2",
"IPPrefixLen": 16,
"IPv6Gateway": "",
"MacAddress": "02:42:ac:11:00:02",
"Networks": {
"bridge": {
"IPAMConfig": null,
"Links": null,
"Aliases": null,
"NetworkID": "16145185c61bbe11c565fbbb974aefaa79db796feb80eccb98765fc67bed8158",
"EndpointID": "ee42a322ee7de01529408d782a93556333cf9e368934ccfe03e98c1223ae44e3",
"Gateway": "172.17.0.1",
"IPAddress": "172.17.0.2",
"IPPrefixLen": 16,
"IPv6Gateway": "",
"GlobalIPv6Address": "",
"GlobalIPv6PrefixLen": 0,
"MacAddress": "02:42:ac:11:00:02"
}
}
}
}
]
After searching the net for like 3 days for an answer I got one from a friend of mine.
He told me get rid of the big red error message "pending migrations"
I had to do
docker-compose exec app bash
and inside that do
rake db:migrate
and then
exit
Which I did and then my problems where gone. So it works now as it should. Hope that someone else can benefit from this too.