ProgrammingError Thread error in SQLAlchemy - sqlite

I have a two simple tables in a sqlite db.
from sqlalchemy import MetaData, Table, Column, Integer, ForeignKey, \
create_engine, String
from sqlalchemy.orm import mapper, relationship, sessionmaker, scoped_session
from sqlalchemy.ext.declarative import declarative_base
engine = create_engine('sqlite:///dir_graph.sqlite', echo=True)
session_factory = sessionmaker(bind=engine)
Session = scoped_session(session_factory)
session = Session()
Base = declarative_base()
class NodeType(Base):
__tablename__ = 'nodetype'
id = Column(Integer, primary_key=True)
name = Column(String(20), unique=True)
nodes = relationship('Node', backref='nodetype')
def __init__(self, name):
self.name = name
def __repr__(self):
return "Nodetype: %s" % self.name
class Node(Base):
__tablename__ = 'node'
id = Column(Integer, primary_key=True)
name = Column(String(20), unique=True)
type_id = Column(Integer,
ForeignKey('nodetype.id'))
def __init__(self, _name, _type_id):
self.name = _name
self.type_id = _type_id
Base.metadata.create_all(engine)
After the run I interact with the interpreter. e.g. n1= Node('Node1',1) to learn about sqlalchemy. After I did a session.commit() and try another statement e.g. n2 = Node('n2',1) I get this error:
sqlalchemy.exc.ProgrammingError: (ProgrammingError) SQLite objects created in a thread can only be used in that same thread.The object was created in thread id 3932 and this is thread id 5740 None None.
How can I continue a session after I did a commit ?
tnx

SQLite by default prohibits the usage of a single connection in more than one thread.
just add connect_args={'check_same_thread': False} parameter to your engine variable like
engine = create_engine('sqlite:///dir_graph.sqlite', connect_args={'check_same_thread': False}, echo=True)
According to sqlite3.connect:
By default, check_same_thread is True and only the creating thread may
use the connection. If set False, the returned connection may be
shared across multiple threads. When using multiple threads with the
same connection writing operations should be serialized by the user to
avoid data corruption.

Related

How to get SQLAlchemy to create a class from a view instead of a table?

I am using flask-sqlalchemy and I want to create a class from a view instead of a db table. Is there an alternative to tablename? 'Car' was recently changed from a table to a view and now it's stuck sending a request.
class car(db.Model):
__tablename__ = 'car'
model = Column(Text, primary_key=True)
brand = Column(Text, primary_key=True)
condition = Column(Text, primary_key=True)
year = Column(Integer)
SQLAlchemy does not have a particular problem with ORM objects based on views. For example, this works fine with SQL Server because SQL Server allows DML (INSERT, UPDATE, DELETE) on views:
# set up test environment
with engine.begin() as conn:
conn.exec_driver_sql("DROP TABLE IF EXISTS car_table")
conn.exec_driver_sql("CREATE TABLE car_table (id integer primary key, make varchar(50))")
conn.exec_driver_sql("INSERT INTO car_table (id, make) VALUES (1, 'Audi'), (2, 'Buick')")
conn.exec_driver_sql("DROP VIEW IF EXISTS car_view")
conn.exec_driver_sql("CREATE VIEW car_view AS SELECT * FROM car_table WHERE id <> 2")
Base = sa.orm.declarative_base()
class Car(Base):
__tablename__ = "car_view"
id = Column(Integer, primary_key=True, autoincrement=False)
make = Column(String(50), nullable=False)
def __repr__(self):
return f"<Car(id={self.id}, make='{self.make}')>"
with Session(engine) as session:
print(session.execute(select(Car)).all())
# [(<Car(id=1, make='Audi')>,)]
# (note: the view excludes the row (object) where id == 2)
session.add(Car(id=3, make="Chevrolet"))
session.commit()
print(session.execute(select(Car)).all())
# [(<Car(id=1, make='Audi')>,), (<Car(id=3, make='Chevrolet')>,)]
However, if you really are using SQLite then you won't be able to add, update, or delete objects using a class based on a view because SQLite doesn't allow that:
sqlalchemy.exc.OperationalError: (sqlite3.OperationalError) cannot modify car_view because it is a view
[SQL: INSERT INTO car_view (id, make) VALUES (?, ?)]
[parameters: (3, 'Chevrolet')]
(Background on this error at: https://sqlalche.me/e/14/e3q8)

Bulk Insert and Returning IDs using sqlite

I understand that sqlite doesn't support RETURNING at least that is what sqlAlchemy is telling me:
sqlalchemy.exc.CompileError: RETURNING is not supported by this dialect's statement compiler.
I get this error when using sqlAlchemy's Core library. Here is a code example:
from sqlalchemy.engine.url import URL
from sqlalchemy import create_engine, MetaData
from sqlalchemy import Table, Column, Integer, String
engine = create_engine('sqlite:///:memory:', echo=False)
# create table
meta = MetaData(engine)
table = Table('userinfo', meta,
Column('id', Integer, primary_key=True),
Column('first_name', String),
Column('age', Integer),
)
meta.create_all()
# generate rows
data = [{'first_name': f'Name {i}', 'age': 18+i} for i in range(10)]
# this seems to work on PostgreSQL only
stmt = table.insert().values(data).returning(table.c.id)
for rowid in engine.execute(stmt).fetchall():
print(rowid['id'])
Now, when I use similar code with sqlAlchemy's ORM library the IDs are returned. Here is the source code:
from sqlalchemy import create_engine
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy import Column, Integer, String
from sqlalchemy import ForeignKey
from sqlalchemy.orm import sessionmaker, scoped_session
from sqlalchemy.orm import relationship
Base = declarative_base()
class UserInfo(Base):
__tablename__ = "userinfo"
id = Column(Integer, primary_key=True)
first_name = Column(String)
age = Column(Integer)
engine = create_engine('sqlite:///:memory:', echo=False)
Base.metadata.create_all(engine)
session = scoped_session(sessionmaker(bind=engine))
data = [dict(first_name=f'Name {i}', age=18+1) for i in range(10)]
session.bulk_insert_mappings(UserInfo, data, return_defaults=True)
session.commit()
print([s['id'] for s in data])
How come that this is working while the Core one is not? When I look at the generated sql I don't see.
After some digging I found this link
In this document the use of bulk_insert_mappings is just Batched INSERT statements via the ORM "bulk", using dictionaries.. When setting return_defaults=True I assume sqlalchemy is repeatedly calling upon last row id. Hence the IDs are available.

Adding UniqueKey constraint to a sqlite3 table with Flask-Migration fails with IntrgrityError

So I using sqlite as my test database and have the following classes in my models.py
class User(UserMixin, db.Model):
__tablename__ = 'users'
id = db.Column(db.Integer, primary_key=True, index=True)
username = db.Column(db.String(40), unique=True, index=True)
password_hash = db.Column(db.String(256))
alternate_id = db.Column(db.String(100))
posts = db.relationship('Posts', backref='author', lazy=True)
def get_id(self):
return str(self.alternate_id)
def __init__(self, username, password):
self.username = username
self.password_hash = generate_password_hash(password)
self.alternate_id = my_serializer.dumps(
self.username + self.password_hash)
def verify_password(self, password):
if check_password_hash(self.password_hash, password):
return "True"
class Posts(db.Model):
id = db.Column(db.Integer, primary_key=True)
title = db.Column(db.String(100), nullable=False, unique=True)
description = db.Column(db.String(1500))
author_id = db.Column(db.Integer, db.ForeignKey('users.id'))
def __init__(self, title, description, author_id):
self.title = title
self.description = description
self.author_id = author_id
I added the unique key constraint to column title in my Posts class and then was trying to update the schema using Flask-Migrate.
Initially I was getting the No support for ALTER of constraints in SQLite dialect errors since sqlite3 does not support it through alembic. So I looked the alembic documentation and found that you can actually do such migrations using the batch mode migrations. So I updated my migration script as below.
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
with op.batch_alter_table("posts") as batch_op:
batch_op.create_unique_constraint('unique_title', ['title'])
# ### end Alembic commands ###
Now when I try to run flask db upgrade I get the following error
sqlalchemy.exc.IntegrityError: (sqlite3.IntegrityError) UNIQUE constraint failed: _alembic_tmp_posts.title [SQL: 'INSERT INTO
_alembic_tmp_posts (id, title, description, author_id) SELECT posts.id, posts.title, posts.description, posts.author_id \nFROM posts'] (Background on this error at: http://sqlalche.me/e/gkpj`)
I am not able to understand that why IntegrityError exception is being thrown because if I look at the insert statement the number of columns are same.
Does it have something to do with the authors_id column having a foreignkey constraint on it ?
The database table column on which I was adding the unique constraint had duplicate data and that was the reason I was getting the integrity error, I am just surprised why I didn't notice that earlier.
So once I removed one of the duplicate rows, the database upgrade was successful.

Migrate Sqlalchemy schema from mssql to sqlite db?

I have all my table classes written for mssql but now I want to test my application locally so I need sqlitedb.Is there a way through which I can Replicate my database in sqlite.
I am facing some issues like sqlite does not support Float as a Primary key.I have more than 200 tables I can not go and edit all just for testing.I can have all the tables in one metadata.
My idea is to use sqlite just for testing and for production I will still be using mssql.
Note I changed Float to Integer but still my tables are not created instead it just creates a empty db.
My code
for table in metadata.tables:
keys_to_change = []
for pkey_column in metadata.tables[table].primary_key.columns.keys():
keys_to_change.append(pkey_column)
for data in list(metadata.tables[table].foreign_keys):
keys_to_change.append(data.column.name)
for column in metadata.tables[table].columns:
if column.name in keys_to_change:
if str(column.type) == 'FLOAT':
column.type = INTEGER
engine = create_engine('sqlite:///mytest.db', echo=True, echo_pool=True)
metadata.create_all(engine)
If you are able to change the model code, I would suggest to create an alias to the Float and use it to define those primary_key and ForeignKey columns, which you could just change for your sqlite testing:
# CONFIGURATION
PKType = Float # default: MSSQL; or Float(N, M)
# PKType = Integer # uncomment this for sqlite
and your model becomes like below:
class MyParent(Base):
__tablename__ = 'my_parent'
id = Column(PKType, primary_key=True)
name = Column(String)
children = relationship('MyChild', backref='parent')
class MyChild(Base):
__tablename__ = 'my_child'
id = Column(PKType, primary_key=True)
parent_id = Column(PKType, ForeignKey('my_parent.id'))
name = Column(String)
Alternatively, if you would like to be only changing the engine and not another configuration variable, you can use dialect-specific custom type handling:
import sqlalchemy.types as types
class PKType(types.TypeDecorator):
impl = Float
def load_dialect_impl(self, dialect):
if dialect.name == 'sqlite':
return dialect.type_descriptor(Integer())
else:
return dialect.type_descriptor(Float())

How to use sqlalchemy to select data from a database?

I have two sqlalchemy scripts, one that creates a database and a few tables and another that selects data from them.
create_database.py
from sqlalchemy import create_engine, Table, Column, Integer, String, MetaData, ForeignKey, select
engine = create_engine('sqlite:///test.db', echo=True)
metadata = MetaData()
addresses = Table ('addresses', metadata,
Column('id', Integer, primary_key=True),
Column('user_id', None, ForeignKey('users.id')),
Column('email_addresses', String, nullable=False)
)
users = Table ('users', metadata,
Column('id', Integer, primary_key=True),
Column('name', String),
Column('fullname', String),
)
metadata.create_all(engine)
select.py
from sqlalchemy import create_engine, select
engine = create_engine('sqlite:///test.db', echo=True)
conn = engine.connect()
s = select([users])
result = conn.execute(s)
I am able to run the create_database.py script but when I run the select.py script I get the following error
$ python select.py
Traceback (most recent call last):
File "select.py", line 5, in <module>
s = select([users])
I am able to run the select statement from within the create_database.py by appending the following to create_database.py
conn = engine.connect()
s = select([users])
result = conn.execute(s)
How can I run the select statements from a separate script than create_database.py
The script select.py does not see users and addresses defined in create_database.py. Import them in select.py before using them.
In select.py:
from create_database import users, addresses
## Do something with users and addresses

Resources