with Session(engine) as session: customers = [ Customer(name="Alice Johnson", email="alice@example.com"), Customer(name="Bob Williams", email="bob@example.com"), Customer(name="Charlie Brown", email="charlie@example.com") ] session.add_all(customers) session.commit() # All objects now have IDs for customer in customers: print(f"{customer.name}: ID {customer.id}")
Objects added to a session automatically participate in the current transaction. Changes are flushed to the database before queries or when you commit.
with Session(engine) as session: # Create items catalog items = [ Item(name="T-Shirt", price=19.99), Item(name="Mug", price=12.50), Item(name="Hat", price=24.99) ] session.add_all(items) session.flush() # Flush to get IDs without committing # Create customer with nested order customer = Customer( name="Jane Doe", email="jane@example.com", orders=[ Order( total_amount=57.48, items=[ OrderItem(item=items[0], quantity=2, price_paid=19.99), OrderItem(item=items[1], quantity=1, price_paid=12.50), OrderItem(item=items[2], quantity=1, price_paid=24.99) ] ) ] ) session.add(customer) session.commit() print(f"Created customer {customer.id} with {len(customer.orders)} order(s)")
When you add an object with relationships to a session, related objects are automatically added too (cascading). The cascade behavior is controlled by the cascade parameter on relationship().
For performance-critical bulk inserts, use Core-style insert:
from sqlalchemy import insertwith Session(engine) as session: # Bulk insert with execute session.execute( insert(Customer), [ {"name": f"Customer {i}", "email": f"customer{i}@example.com"} for i in range(1000) ] ) session.commit()
Bulk inserts bypass ORM features like relationship cascading and Python-side defaults. Use them when you need raw performance for simple inserts.
with Session(engine) as session: # Load customer with orders stmt = select(Customer).where(Customer.id == 1) customer = session.scalars(stmt).one() # Add a new order new_order = Order(total_amount=99.99) customer.orders.append(new_order) # Modify existing orders for order in customer.orders: if order.total_amount > 100: order.total_amount *= 0.9 # Apply discount session.commit()
with Session(engine) as session: # Fetch customer with orders stmt = select(Customer).where(Customer.id == 2) customer = session.scalars(stmt).one() print(f"Customer has {len(customer.orders)} orders") for order in customer.orders: print(f" Order {order.id} has {len(order.items)} items") # Delete customer - cascades to orders and order_items session.delete(customer) session.commit() print("Customer and all related orders deleted!")
from sqlalchemy import deletewith Session(engine) as session: # Delete all orders older than a date from datetime import datetime, timedelta cutoff_date = datetime.now() - timedelta(days=90) stmt = delete(Order).where(Order.order_date < cutoff_date) result = session.execute(stmt) print(f"Deleted {result.rowcount} old orders") session.commit()
Bulk delete operations bypass ORM cascade logic. If you need cascading deletes, load the objects first and use session.delete().
Removing items from a collection deletes them when using delete-orphan:
with Session(engine) as session: stmt = select(Order).where(Order.id == 1) order = session.scalars(stmt).one() # Remove first item - it becomes an orphan and gets deleted removed_item = order.items.pop(0) session.commit() # The OrderItem is deleted from database
with Session(engine) as session: # 1. Add objects (not yet in database) customer = Customer(name="Test User", email="test@example.com") session.add(customer) print(f"Customer ID before flush: {customer.id}") # None # 2. Flush sends INSERT but doesn't commit session.flush() print(f"Customer ID after flush: {customer.id}") # Has ID now # 3. Can query the object in the same transaction stmt = select(Customer).where(Customer.id == customer.id) same_customer = session.scalars(stmt).one() assert same_customer is customer # Same object! # 4. Modify the object customer.email = "updated@example.com" # 5. Commit finalizes all changes session.commit()
Use flush() when you need database-generated values (like IDs) but want to continue working in the same transaction. Use commit() to finalize all changes.
with Session(engine) as session: batch_size = 1000 for i in range(0, 10000, batch_size): customers = [ Customer( name=f"Customer {j}", email=f"customer{j}@example.com" ) for j in range(i, i + batch_size) ] session.add_all(customers) session.flush() # Flush each batch session.commit() # Single commit at the end
from sqlalchemy.orm import sessionmaker# Create session factory with expire_on_commit=FalseSessionLocal = sessionmaker(engine, expire_on_commit=False)with SessionLocal() as session: customer = Customer(name="Alice", email="alice@example.com") session.add(customer) session.commit() # Object attributes are still accessible without new query print(customer.name) # No SELECT issued
Setting expire_on_commit=False keeps objects accessible after commit without re-querying, but be aware they may become stale if modified elsewhere.