U a@sdZddlZddlZddlZddlZddlmZddlm Z ddlm Z ddlm Z dd lm Z dd lm Z dd lmZdd lmZdd lmZddlmZddlmZddlmZddlmZddlmZddlmZddlmZddlmZddlmZddlmZddlmZddlm Z ddlmZ!ddlm"Z"ddlm#Z#ddlm$Z$ddlm%Z%ddlm&Z&ddlm'Z'dd lm(Z(dd!lm)Z)dd"lm*Z*dd#lm+Z+dd$lm,Z,dd%lm-Z-dd&lm.Z.dd'lm/Z/dd(lm0Z0dd)lm1Z1dd*l2m3Z3d+Z4d,Z5d-Z6d.Z7d/Z8d0Z9d1Z:e;d2d3d4d5d6d7d8d9d:d;dd?d@dAdBdCdDdEdFdGdHdIdJdKdLdMdNdOdPdQdRdSdTdUdVdWdXdYdZd[d\d]d^d_d`dadbdcdddedfdgdhdidjdkdldmdndodpdqdrdsdtdudvdwdxdydzd{d|d}d~dddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddgZZ?Gdddej@ZAGdddejBZBeBZCGdddeDZEGdddeEejFZGGdddeEejFZHGdddeEejFZIGdddeEejFZJGdddeDZKGdddeKejLZMGdddeKejNZOGdddejPZQGdddeQZRGdddejNZSGdddejTejUZTGdddejUZVGdd d ejWZXGd d d ejYZZGd d d ej[Z\Gdddej[Z]Gdddej[Z^Gdddej[Z_Gdddejj`Zae3eadZbeGZceAZde=Zee?ZfeBZgeHZheIZieJZje.ZkeSZle/Zme,Zne$Zoe*Zpe#ZqeTZreVZseZZte\Zue]Zve^Zwe_Zxe)e"e-e?e/e,e$e*e.eSe'e+e(e&eIeJe%eBeHe#eTeZe=eVeXeQe\e]e^e_dZyGdddejzZ{Gdddej|Z}Gdddej~ZGdddeZGd d!d!ejZGd"d#d#ejZd$d%Zd&d'Zd(d)Zd*d+ZeZd,d-ZGd.d/d/ejZdS(0ad .. dialect:: mssql :name: Microsoft SQL Server :full_support: 2017 :normal_support: 2012+ :best_effort: 2005+ .. _mssql_external_dialects: External Dialects ----------------- In addition to the above DBAPI layers with native SQLAlchemy support, there are third-party dialects for other DBAPI layers that are compatible with SQL Server. See the "External Dialects" list on the :ref:`dialect_toplevel` page. .. _mssql_identity: Auto Increment Behavior / IDENTITY Columns ------------------------------------------ SQL Server provides so-called "auto incrementing" behavior using the ``IDENTITY`` construct, which can be placed on any single integer column in a table. SQLAlchemy considers ``IDENTITY`` within its default "autoincrement" behavior for an integer primary key column, described at :paramref:`_schema.Column.autoincrement`. This means that by default, the first integer primary key column in a :class:`_schema.Table` will be considered to be the identity column - unless it is associated with a :class:`.Sequence` - and will generate DDL as such:: from sqlalchemy import Table, MetaData, Column, Integer m = MetaData() t = Table('t', m, Column('id', Integer, primary_key=True), Column('x', Integer)) m.create_all(engine) The above example will generate DDL as: .. sourcecode:: sql CREATE TABLE t ( id INTEGER NOT NULL IDENTITY, x INTEGER NULL, PRIMARY KEY (id) ) For the case where this default generation of ``IDENTITY`` is not desired, specify ``False`` for the :paramref:`_schema.Column.autoincrement` flag, on the first integer primary key column:: m = MetaData() t = Table('t', m, Column('id', Integer, primary_key=True, autoincrement=False), Column('x', Integer)) m.create_all(engine) To add the ``IDENTITY`` keyword to a non-primary key column, specify ``True`` for the :paramref:`_schema.Column.autoincrement` flag on the desired :class:`_schema.Column` object, and ensure that :paramref:`_schema.Column.autoincrement` is set to ``False`` on any integer primary key column:: m = MetaData() t = Table('t', m, Column('id', Integer, primary_key=True, autoincrement=False), Column('x', Integer, autoincrement=True)) m.create_all(engine) .. versionchanged:: 1.4 Added :class:`_schema.Identity` construct in a :class:`_schema.Column` to specify the start and increment parameters of an IDENTITY. These replace the use of the :class:`.Sequence` object in order to specify these values. .. deprecated:: 1.4 The ``mssql_identity_start`` and ``mssql_identity_increment`` parameters to :class:`_schema.Column` are deprecated and should we replaced by an :class:`_schema.Identity` object. Specifying both ways of configuring an IDENTITY will result in a compile error. These options are also no longer returned as part of the ``dialect_options`` key in :meth:`_reflection.Inspector.get_columns`. Use the information in the ``identity`` key instead. .. deprecated:: 1.3 The use of :class:`.Sequence` to specify IDENTITY characteristics is deprecated and will be removed in a future release. Please use the :class:`_schema.Identity` object parameters :paramref:`_schema.Identity.start` and :paramref:`_schema.Identity.increment`. .. versionchanged:: 1.4 Removed the ability to use a :class:`.Sequence` object to modify IDENTITY characteristics. :class:`.Sequence` objects now only manipulate true T-SQL SEQUENCE types. .. note:: There can only be one IDENTITY column on the table. When using ``autoincrement=True`` to enable the IDENTITY keyword, SQLAlchemy does not guard against multiple columns specifying the option simultaneously. The SQL Server database will instead reject the ``CREATE TABLE`` statement. .. note:: An INSERT statement which attempts to provide a value for a column that is marked with IDENTITY will be rejected by SQL Server. In order for the value to be accepted, a session-level option "SET IDENTITY_INSERT" must be enabled. The SQLAlchemy SQL Server dialect will perform this operation automatically when using a core :class:`_expression.Insert` construct; if the execution specifies a value for the IDENTITY column, the "IDENTITY_INSERT" option will be enabled for the span of that statement's invocation.However, this scenario is not high performing and should not be relied upon for normal use. If a table doesn't actually require IDENTITY behavior in its integer primary key column, the keyword should be disabled when creating the table by ensuring that ``autoincrement=False`` is set. Controlling "Start" and "Increment" ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ Specific control over the "start" and "increment" values for the ``IDENTITY`` generator are provided using the :paramref:`_schema.Identity.start` and :paramref:`_schema.Identity.increment` parameters passed to the :class:`_schema.Identity` object:: from sqlalchemy import Table, Integer, Column, Identity test = Table( 'test', metadata, Column( 'id', Integer, primary_key=True, Identity(start=100, increment=10) ), Column('name', String(20)) ) The CREATE TABLE for the above :class:`_schema.Table` object would be: .. sourcecode:: sql CREATE TABLE test ( id INTEGER NOT NULL IDENTITY(100,10) PRIMARY KEY, name VARCHAR(20) NULL, ) .. note:: The :class:`_schema.Identity` object supports many other parameter in addition to ``start`` and ``increment``. These are not supported by SQL Server and will be ignored when generating the CREATE TABLE ddl. .. versionchanged:: 1.3.19 The :class:`_schema.Identity` object is now used to affect the ``IDENTITY`` generator for a :class:`_schema.Column` under SQL Server. Previously, the :class:`.Sequence` object was used. As SQL Server now supports real sequences as a separate construct, :class:`.Sequence` will be functional in the normal way starting from SQLAlchemy version 1.4. INSERT behavior ^^^^^^^^^^^^^^^^ Handling of the ``IDENTITY`` column at INSERT time involves two key techniques. The most common is being able to fetch the "last inserted value" for a given ``IDENTITY`` column, a process which SQLAlchemy performs implicitly in many cases, most importantly within the ORM. The process for fetching this value has several variants: * In the vast majority of cases, RETURNING is used in conjunction with INSERT statements on SQL Server in order to get newly generated primary key values: .. sourcecode:: sql INSERT INTO t (x) OUTPUT inserted.id VALUES (?) * When RETURNING is not available or has been disabled via ``implicit_returning=False``, either the ``scope_identity()`` function or the ``@@identity`` variable is used; behavior varies by backend: * when using PyODBC, the phrase ``; select scope_identity()`` will be appended to the end of the INSERT statement; a second result set will be fetched in order to receive the value. Given a table as:: t = Table('t', m, Column('id', Integer, primary_key=True), Column('x', Integer), implicit_returning=False) an INSERT will look like: .. sourcecode:: sql INSERT INTO t (x) VALUES (?); select scope_identity() * Other dialects such as pymssql will call upon ``SELECT scope_identity() AS lastrowid`` subsequent to an INSERT statement. If the flag ``use_scope_identity=False`` is passed to :func:`_sa.create_engine`, the statement ``SELECT @@identity AS lastrowid`` is used instead. A table that contains an ``IDENTITY`` column will prohibit an INSERT statement that refers to the identity column explicitly. The SQLAlchemy dialect will detect when an INSERT construct, created using a core :func:`_expression.insert` construct (not a plain string SQL), refers to the identity column, and in this case will emit ``SET IDENTITY_INSERT ON`` prior to the insert statement proceeding, and ``SET IDENTITY_INSERT OFF`` subsequent to the execution. Given this example:: m = MetaData() t = Table('t', m, Column('id', Integer, primary_key=True), Column('x', Integer)) m.create_all(engine) with engine.begin() as conn: conn.execute(t.insert(), {'id': 1, 'x':1}, {'id':2, 'x':2}) The above column will be created with IDENTITY, however the INSERT statement we emit is specifying explicit values. In the echo output we can see how SQLAlchemy handles this: .. sourcecode:: sql CREATE TABLE t ( id INTEGER NOT NULL IDENTITY(1,1), x INTEGER NULL, PRIMARY KEY (id) ) COMMIT SET IDENTITY_INSERT t ON INSERT INTO t (id, x) VALUES (?, ?) ((1, 1), (2, 2)) SET IDENTITY_INSERT t OFF COMMIT This is an auxiliary use case suitable for testing and bulk insert scenarios. SEQUENCE support ---------------- The :class:`.Sequence` object now creates "real" sequences, i.e., ``CREATE SEQUENCE``. To provide compatibility with other dialects, :class:`.Sequence` defaults to a start value of 1, even though the T-SQL defaults is -9223372036854775808. .. versionadded:: 1.4.0 MAX on VARCHAR / NVARCHAR ------------------------- SQL Server supports the special string "MAX" within the :class:`_types.VARCHAR` and :class:`_types.NVARCHAR` datatypes, to indicate "maximum length possible". The dialect currently handles this as a length of "None" in the base type, rather than supplying a dialect-specific version of these types, so that a base type specified such as ``VARCHAR(None)`` can assume "unlengthed" behavior on more than one backend without using dialect-specific types. To build a SQL Server VARCHAR or NVARCHAR with MAX length, use None:: my_table = Table( 'my_table', metadata, Column('my_data', VARCHAR(None)), Column('my_n_data', NVARCHAR(None)) ) Collation Support ----------------- Character collations are supported by the base string types, specified by the string argument "collation":: from sqlalchemy import VARCHAR Column('login', VARCHAR(32, collation='Latin1_General_CI_AS')) When such a column is associated with a :class:`_schema.Table`, the CREATE TABLE statement for this column will yield:: login VARCHAR(32) COLLATE Latin1_General_CI_AS NULL LIMIT/OFFSET Support -------------------- MSSQL has added support for LIMIT / OFFSET as of SQL Server 2012, via the "OFFSET n ROWS" and "FETCH NEXT n ROWS" clauses. SQLAlchemy supports these syntaxes automatically if SQL Server 2012 or greater is detected. .. versionchanged:: 1.4 support added for SQL Server "OFFSET n ROWS" and "FETCH NEXT n ROWS" syntax. For statements that specify only LIMIT and no OFFSET, all versions of SQL Server support the TOP keyword. This syntax is used for all SQL Server versions when no OFFSET clause is present. A statement such as:: select(some_table).limit(5) will render similarly to:: SELECT TOP 5 col1, col2.. FROM table For versions of SQL Server prior to SQL Server 2012, a statement that uses LIMIT and OFFSET, or just OFFSET alone, will be rendered using the ``ROW_NUMBER()`` window function. A statement such as:: select(some_table).order_by(some_table.c.col3).limit(5).offset(10) will render similarly to:: SELECT anon_1.col1, anon_1.col2 FROM (SELECT col1, col2, ROW_NUMBER() OVER (ORDER BY col3) AS mssql_rn FROM table WHERE t.x = :x_1) AS anon_1 WHERE mssql_rn > :param_1 AND mssql_rn <= :param_2 + :param_1 Note that when using LIMIT and/or OFFSET, whether using the older or newer SQL Server syntaxes, the statement must have an ORDER BY as well, else a :class:`.CompileError` is raised. .. _mssql_isolation_level: Transaction Isolation Level --------------------------- All SQL Server dialects support setting of transaction isolation level both via a dialect-specific parameter :paramref:`_sa.create_engine.isolation_level` accepted by :func:`_sa.create_engine`, as well as the :paramref:`.Connection.execution_options.isolation_level` argument as passed to :meth:`_engine.Connection.execution_options`. This feature works by issuing the command ``SET TRANSACTION ISOLATION LEVEL `` for each new connection. To set isolation level using :func:`_sa.create_engine`:: engine = create_engine( "mssql+pyodbc://scott:tiger@ms_2008", isolation_level="REPEATABLE READ" ) To set using per-connection execution options:: connection = engine.connect() connection = connection.execution_options( isolation_level="READ COMMITTED" ) Valid values for ``isolation_level`` include: * ``AUTOCOMMIT`` - pyodbc / pymssql-specific * ``READ COMMITTED`` * ``READ UNCOMMITTED`` * ``REPEATABLE READ`` * ``SERIALIZABLE`` * ``SNAPSHOT`` - specific to SQL Server .. versionadded:: 1.2 added AUTOCOMMIT isolation level setting .. seealso:: :ref:`dbapi_autocommit` Nullability ----------- MSSQL has support for three levels of column nullability. The default nullability allows nulls and is explicit in the CREATE TABLE construct:: name VARCHAR(20) NULL If ``nullable=None`` is specified then no specification is made. In other words the database's configured default is used. This will render:: name VARCHAR(20) If ``nullable`` is ``True`` or ``False`` then the column will be ``NULL`` or ``NOT NULL`` respectively. Date / Time Handling -------------------- DATE and TIME are supported. Bind parameters are converted to datetime.datetime() objects as required by most MSSQL drivers, and results are processed from strings if needed. The DATE and TIME types are not available for MSSQL 2005 and previous - if a server version below 2008 is detected, DDL for these types will be issued as DATETIME. .. _mssql_large_type_deprecation: Large Text/Binary Type Deprecation ---------------------------------- Per `SQL Server 2012/2014 Documentation `_, the ``NTEXT``, ``TEXT`` and ``IMAGE`` datatypes are to be removed from SQL Server in a future release. SQLAlchemy normally relates these types to the :class:`.UnicodeText`, :class:`_expression.TextClause` and :class:`.LargeBinary` datatypes. In order to accommodate this change, a new flag ``deprecate_large_types`` is added to the dialect, which will be automatically set based on detection of the server version in use, if not otherwise set by the user. The behavior of this flag is as follows: * When this flag is ``True``, the :class:`.UnicodeText`, :class:`_expression.TextClause` and :class:`.LargeBinary` datatypes, when used to render DDL, will render the types ``NVARCHAR(max)``, ``VARCHAR(max)``, and ``VARBINARY(max)``, respectively. This is a new behavior as of the addition of this flag. * When this flag is ``False``, the :class:`.UnicodeText`, :class:`_expression.TextClause` and :class:`.LargeBinary` datatypes, when used to render DDL, will render the types ``NTEXT``, ``TEXT``, and ``IMAGE``, respectively. This is the long-standing behavior of these types. * The flag begins with the value ``None``, before a database connection is established. If the dialect is used to render DDL without the flag being set, it is interpreted the same as ``False``. * On first connection, the dialect detects if SQL Server version 2012 or greater is in use; if the flag is still at ``None``, it sets it to ``True`` or ``False`` based on whether 2012 or greater is detected. * The flag can be set to either ``True`` or ``False`` when the dialect is created, typically via :func:`_sa.create_engine`:: eng = create_engine("mssql+pymssql://user:pass@host/db", deprecate_large_types=True) * Complete control over whether the "old" or "new" types are rendered is available in all SQLAlchemy versions by using the UPPERCASE type objects instead: :class:`_types.NVARCHAR`, :class:`_types.VARCHAR`, :class:`_types.VARBINARY`, :class:`_types.TEXT`, :class:`_mssql.NTEXT`, :class:`_mssql.IMAGE` will always remain fixed and always output exactly that type. .. versionadded:: 1.0.0 .. _multipart_schema_names: Multipart Schema Names ---------------------- SQL Server schemas sometimes require multiple parts to their "schema" qualifier, that is, including the database name and owner name as separate tokens, such as ``mydatabase.dbo.some_table``. These multipart names can be set at once using the :paramref:`_schema.Table.schema` argument of :class:`_schema.Table`:: Table( "some_table", metadata, Column("q", String(50)), schema="mydatabase.dbo" ) When performing operations such as table or component reflection, a schema argument that contains a dot will be split into separate "database" and "owner" components in order to correctly query the SQL Server information schema tables, as these two values are stored separately. Additionally, when rendering the schema name for DDL or SQL, the two components will be quoted separately for case sensitive names and other special characters. Given an argument as below:: Table( "some_table", metadata, Column("q", String(50)), schema="MyDataBase.dbo" ) The above schema would be rendered as ``[MyDataBase].dbo``, and also in reflection, would be reflected using "dbo" as the owner and "MyDataBase" as the database name. To control how the schema name is broken into database / owner, specify brackets (which in SQL Server are quoting characters) in the name. Below, the "owner" will be considered as ``MyDataBase.dbo`` and the "database" will be None:: Table( "some_table", metadata, Column("q", String(50)), schema="[MyDataBase.dbo]" ) To individually specify both database and owner name with special characters or embedded dots, use two sets of brackets:: Table( "some_table", metadata, Column("q", String(50)), schema="[MyDataBase.Period].[MyOwner.Dot]" ) .. versionchanged:: 1.2 the SQL Server dialect now treats brackets as identifier delimeters splitting the schema into separate database and owner tokens, to allow dots within either name itself. .. _legacy_schema_rendering: Legacy Schema Mode ------------------ Very old versions of the MSSQL dialect introduced the behavior such that a schema-qualified table would be auto-aliased when used in a SELECT statement; given a table:: account_table = Table( 'account', metadata, Column('id', Integer, primary_key=True), Column('info', String(100)), schema="customer_schema" ) this legacy mode of rendering would assume that "customer_schema.account" would not be accepted by all parts of the SQL statement, as illustrated below:: >>> eng = create_engine("mssql+pymssql://mydsn", legacy_schema_aliasing=True) >>> print(account_table.select().compile(eng)) SELECT account_1.id, account_1.info FROM customer_schema.account AS account_1 This mode of behavior is now off by default, as it appears to have served no purpose; however in the case that legacy applications rely upon it, it is available using the ``legacy_schema_aliasing`` argument to :func:`_sa.create_engine` as illustrated above. .. versionchanged:: 1.1 the ``legacy_schema_aliasing`` flag introduced in version 1.0.5 to allow disabling of legacy mode for schemas now defaults to False. .. deprecated:: 1.4 The ``legacy_schema_aliasing`` flag is now deprecated and will be removed in a future release. .. _mssql_indexes: Clustered Index Support ----------------------- The MSSQL dialect supports clustered indexes (and primary keys) via the ``mssql_clustered`` option. This option is available to :class:`.Index`, :class:`.UniqueConstraint`. and :class:`.PrimaryKeyConstraint`. To generate a clustered index:: Index("my_index", table.c.x, mssql_clustered=True) which renders the index as ``CREATE CLUSTERED INDEX my_index ON table (x)``. To generate a clustered primary key use:: Table('my_table', metadata, Column('x', ...), Column('y', ...), PrimaryKeyConstraint("x", "y", mssql_clustered=True)) which will render the table, for example, as:: CREATE TABLE my_table (x INTEGER NOT NULL, y INTEGER NOT NULL, PRIMARY KEY CLUSTERED (x, y)) Similarly, we can generate a clustered unique constraint using:: Table('my_table', metadata, Column('x', ...), Column('y', ...), PrimaryKeyConstraint("x"), UniqueConstraint("y", mssql_clustered=True), ) To explicitly request a non-clustered primary key (for example, when a separate clustered index is desired), use:: Table('my_table', metadata, Column('x', ...), Column('y', ...), PrimaryKeyConstraint("x", "y", mssql_clustered=False)) which will render the table, for example, as:: CREATE TABLE my_table (x INTEGER NOT NULL, y INTEGER NOT NULL, PRIMARY KEY NONCLUSTERED (x, y)) .. versionchanged:: 1.1 the ``mssql_clustered`` option now defaults to None, rather than False. ``mssql_clustered=False`` now explicitly renders the NONCLUSTERED clause, whereas None omits the CLUSTERED clause entirely, allowing SQL Server defaults to take effect. MSSQL-Specific Index Options ----------------------------- In addition to clustering, the MSSQL dialect supports other special options for :class:`.Index`. INCLUDE ^^^^^^^ The ``mssql_include`` option renders INCLUDE(colname) for the given string names:: Index("my_index", table.c.x, mssql_include=['y']) would render the index as ``CREATE INDEX my_index ON table (x) INCLUDE (y)`` .. _mssql_index_where: Filtered Indexes ^^^^^^^^^^^^^^^^ The ``mssql_where`` option renders WHERE(condition) for the given string names:: Index("my_index", table.c.x, mssql_where=table.c.x > 10) would render the index as ``CREATE INDEX my_index ON table (x) WHERE x > 10``. .. versionadded:: 1.3.4 Index ordering ^^^^^^^^^^^^^^ Index ordering is available via functional expressions, such as:: Index("my_index", table.c.x.desc()) would render the index as ``CREATE INDEX my_index ON table (x DESC)`` .. seealso:: :ref:`schema_indexes_functional` Compatibility Levels -------------------- MSSQL supports the notion of setting compatibility levels at the database level. This allows, for instance, to run a database that is compatible with SQL2000 while running on a SQL2005 database server. ``server_version_info`` will always return the database server version information (in this case SQL2005) and not the compatibility level information. Because of this, if running under a backwards compatibility mode SQLAlchemy may attempt to use T-SQL statements that are unable to be parsed by the database server. Triggers -------- SQLAlchemy by default uses OUTPUT INSERTED to get at newly generated primary key values via IDENTITY columns or other server side defaults. MS-SQL does not allow the usage of OUTPUT INSERTED on tables that have triggers. To disable the usage of OUTPUT INSERTED on a per-table basis, specify ``implicit_returning=False`` for each :class:`_schema.Table` which has triggers:: Table('mytable', metadata, Column('id', Integer, primary_key=True), # ..., implicit_returning=False ) Declarative form:: class MyClass(Base): # ... __table_args__ = {'implicit_returning':False} This option can also be specified engine-wide using the ``implicit_returning=False`` argument on :func:`_sa.create_engine`. .. _mssql_rowcount_versioning: Rowcount Support / ORM Versioning --------------------------------- The SQL Server drivers may have limited ability to return the number of rows updated from an UPDATE or DELETE statement. As of this writing, the PyODBC driver is not able to return a rowcount when OUTPUT INSERTED is used. This impacts the SQLAlchemy ORM's versioning feature in many cases where server-side value generators are in use in that while the versioning operations can succeed, the ORM cannot always check that an UPDATE or DELETE statement matched the number of rows expected, which is how it verifies that the version identifier matched. When this condition occurs, a warning will be emitted but the operation will proceed. The use of OUTPUT INSERTED can be disabled by setting the :paramref:`_schema.Table.implicit_returning` flag to ``False`` on a particular :class:`_schema.Table`, which in declarative looks like:: class MyTable(Base): __tablename__ = 'mytable' id = Column(Integer, primary_key=True) stuff = Column(String(10)) timestamp = Column(TIMESTAMP(), default=text('DEFAULT')) __mapper_args__ = { 'version_id_col': timestamp, 'version_id_generator': False, } __table_args__ = { 'implicit_returning': False } Enabling Snapshot Isolation --------------------------- SQL Server has a default transaction isolation mode that locks entire tables, and causes even mildly concurrent applications to have long held locks and frequent deadlocks. Enabling snapshot isolation for the database as a whole is recommended for modern levels of concurrency support. This is accomplished via the following ALTER DATABASE commands executed at the SQL prompt:: ALTER DATABASE MyDatabase SET ALLOW_SNAPSHOT_ISOLATION ON ALTER DATABASE MyDatabase SET READ_COMMITTED_SNAPSHOT ON Background on SQL Server snapshot isolation is available at https://msdn.microsoft.com/en-us/library/ms175095.aspx. N)information_schema)JSON) JSONIndexType) JSONPathType)exc)Identity)schema)Sequence)sql)types)util)cursor)default) reflection) coercions)compiler)elements) expression)func) quoted_name)roles)BIGINT)BINARY)CHAR)DATE)DATETIME)DECIMAL)FLOAT)INTEGER)NCHAR)NUMERIC)NVARCHAR)SMALLINT)TEXT)VARCHAR)compatupdate_wrapper)public_factory)) ) ) ) ) )addallZalterandanyasZasc authorizationbackupbeginZbetweenbreakZbrowseZbulkZbyZcascadeZcasecheck checkpointclose clusteredZcoalesceZcollatecolumncommitZcompute constraintcontainsZ containstablecontinueconvertcreateZcrosscurrentZ current_date current_timeZcurrent_timestampZ current_userrZdatabaseZdbccZ deallocateZdeclarerdeleteZdenydescZdiskZdistinctZ distributeddoubledropdumpelseendZerrlvlescapeexceptexecexecuteexistsexitZexternalfetchfileZ fillfactorforZforeignZfreetextZ freetexttablefromfullfunctiongotoZgrantgroupZhavingZholdlockidentityZidentity_insertZ identitycolifinindexinnerinsertZ intersectZintoisjoinkeykillleftlikelinenoloadmergeZnationalZnocheckZ nonclusterednotnullZnullifZofoffoffsetsonopenZopendatasourceZ openqueryZ openrowsetZopenxmloptionororderouteroverpercentZpivotZplan precisionZprimaryprintprocZ procedurepublicZ raiserrorreadZreadtext reconfigureZ referencesZ replicationrestoreZrestrictreturnrevertZrevokerightrollbackrowcountZ rowguidcolZrulesaver Z securityauditselectZ session_usersetZsetusershutdownZsomeZ statisticsZ system_usertableZ tablesampleZtextsizeZthentotopZtranZ transactionZtriggertruncateZtsequalunionuniqueZunpivotupdateZ updatetextZuseuservaluesZvaryingviewZwaitforwhenwherewhilewithZ writetextcs eZdZdZfddZZS)REALc s"|ddtt|jf|dS)Nrx) setdefaultsuperr__init__)selfkw __class___C:\Users\vtejo\AppData\Local\Temp\pip-unpacked-wheel-nyjtotrf\sqlalchemy\dialects\mssql\base.pyrs z REAL.__init____name__ __module__ __qualname____visit_name__r __classcell__rrrrrsrc@seZdZdZdS)TINYINTNrrrrrrrrrsrc@s&eZdZddZedZddZdS)_MSDatecCs dd}|S)NcSs*t|tjkr"t|j|j|jS|SdSNtypedatetimedateyearmonthdayvaluerrrprocesssz'_MSDate.bind_processor..processrrdialectrrrrbind_processorsz_MSDate.bind_processorz(\d+)-(\d+)-(\d+)csfdd}|S)Ncs^t|tjr|St|tjrVj|}|s>td|ftjdd|DS|SdS)Nz"could not parse %r as a date valuecSsg|]}t|pdqSrint.0xrrr sz=_MSDate.result_processor..process..) isinstancerrr string_types_regmatch ValueErrorgroupsrmrrrrs   z)_MSDate.result_processor..processrrrcoltyperrrrresult_processors z_MSDate.result_processorN)rrrrrecompilerrrrrrrs rcsFeZdZd fdd ZedddZddZe dZ d d Z Z S) TIMENc s||_tt|dSr)rxrrr)rrxkwargsrrrrsz TIME.__init__ilrcsfdd}|S)Ncs:t|tjr"tjj|}nt|tjr6t|}|Sr)rrcombine_TIME__zero_datetimestrrrrrrs  z$TIME.bind_processor..processrrrrrrs zTIME.bind_processorz!(\d+):(\d+):(\d+)(?:\.(\d{0,6}))?csfdd}|S)Ncs^t|tjr|St|tjrVj|}|s>td|ftjdd|DS|SdS)Nz"could not parse %r as a time valuecSsg|]}t|pdqSrrrrrrr1sz:TIME.result_processor..process..) rrrrrrrrrrrrrr(s   z&TIME.result_processor..processrrrrrr's zTIME.result_processor)N) rrrrrrrrrrrrrrrrrrs  rc@seZdZddZdS) _DateTimeBasecCs dd}|S)NcSs*t|tjkr"t|j|j|jS|SdSrrrrrrr=sz-_DateTimeBase.bind_processor..processrrrrrr<sz_DateTimeBase.bind_processorN)rrrrrrrrr;src@s eZdZdS) _MSDateTimeNrrrrrrrrFsrc@seZdZdZdS) SMALLDATETIMENrrrrrrJsrcs"eZdZdZdfdd ZZS) DATETIME2Nc stt|jf|||_dSr)rrrrxrrxrrrrrQszDATETIME2.__init__)NrrrrrrNsrcs"eZdZdZdfdd ZZS)DATETIMEOFFSETNc stt|jf|||_dSr)rrrrxrrrrrYszDATETIMEOFFSET.__init__)NrrrrrrVsrc@seZdZddZdS)_UnicodeLiteralcsfdd}|S)Ncs(|dd}jjr |dd}d|S)N'z''%z%%zN'%s')replaceidentifier_preparerZ_double_percentsrrrrr`s  z2_UnicodeLiteral.literal_processor..processrrrrrliteral_processor_s z!_UnicodeLiteral.literal_processorN)rrrrrrrrr^src@s eZdZdS) _MSUnicodeNrrrrrrlsrc@s eZdZdS)_MSUnicodeTextNrrrrrrpsrcs2eZdZdZdZdZdddZfddZZS) TIMESTAMPaBImplement the SQL Server TIMESTAMP type. Note this is **completely different** than the SQL Standard TIMESTAMP type, which is not supported by SQL Server. It is a read-only datatype that does not support INSERT of values. .. versionadded:: 1.2 .. seealso:: :class:`_mssql.ROWVERSION` NFcCs ||_dS)zConstruct a TIMESTAMP or ROWVERSION type. :param convert_int: if True, binary integer values will be converted to integers on read. .. versionadded:: 1.2 N) convert_int)rrrrrrs zTIMESTAMP.__init__cs0tt||||jr(fdd}|SSdS)Ncs&|}|dk r"tt|dd}|S)Nhex)rcodecsencodersuper_rrrsz+TIMESTAMP.result_processor..process)rrrrrrrrrs  zTIMESTAMP.result_processor)F) rrr__doc__rlengthrrrrrrrrts  rc@seZdZdZdZdS) ROWVERSIONa(Implement the SQL Server ROWVERSION type. The ROWVERSION datatype is a SQL Server synonym for the TIMESTAMP datatype, however current SQL Server documentation suggests using ROWVERSION for new datatypes going forward. The ROWVERSION datatype does **not** reflect (e.g. introspect) from the database as itself; the returned datatype will be :class:`_mssql.TIMESTAMP`. This is a read-only datatype that does not support INSERT of values. .. versionadded:: 1.2 .. seealso:: :class:`_mssql.TIMESTAMP` Nrrrrrrrrrrsrc@seZdZdZdZdS)NTEXTzMMSSQL NTEXT type, for variable-length unicode text up to 2^30 characters.Nrrrrrrsrc@seZdZdZdZdS) VARBINARYaGThe MSSQL VARBINARY type. This type is present to support "deprecate_large_types" mode where either ``VARBINARY(max)`` or IMAGE is rendered. Otherwise, this type object is redundant vs. :class:`_types.VARBINARY`. .. versionadded:: 1.0.0 .. seealso:: :ref:`mssql_large_type_deprecation` Nrrrrrrsrc@seZdZdZdS)IMAGENrrrrrrsrc@seZdZdZdZdS)XMLa"MSSQL XML type. This is a placeholder type for reflection purposes that does not include any Python-side datatype support. It also does not currently support additional arguments, such as "CONTENT", "DOCUMENT", "xml_schema_collection". .. versionadded:: 1.1.11 Nrrrrrrs rc@seZdZdZdZdS)BITzMSSQL BIT type. Both pyodbc and pymssql return values from BIT columns as Python so just subclass Boolean. Nrrrrrrsrc@seZdZdZdS)MONEYNrrrrrrsrc@seZdZdZdS) SMALLMONEYNrrrrrrsrc@seZdZdZdS)UNIQUEIDENTIFIERNrrrrrrsrc@seZdZdZdS) SQL_VARIANTNrrrrrrsrcs(eZdZdZdZdZfddZZS)TryCastz+Represent a SQL Server TRY_CAST expression.try_castmssqlcstt|j||dS)aCreate a TRY_CAST expression. :class:`.TryCast` is a subclass of SQLAlchemy's :class:`.Cast` construct, and works in the same way, except that the SQL expression rendered is "TRY_CAST" rather than "CAST":: from sqlalchemy import select from sqlalchemy import Numeric from sqlalchemy.dialects.mssql import try_cast stmt = select( try_cast(product_table.c.unit_price, Numeric(10, 4)) ) The above would render:: SELECT TRY_CAST (product_table.unit_price AS NUMERIC(10, 4)) FROM product_table .. versionadded:: 1.3.7 N)rrr)rargrrrrr szTryCast.__init__)rrrrrZstringify_dialectrrrrrrrsrz.dialects.mssql.try_cast)rZbigintZsmallintZtinyintZvarcharZnvarcharcharZnchartextZntextdecimalnumericfloatrZ datetime2ZdatetimeoffsetrrZ smalldatetimebinaryZ varbinarybitrealimagexml timestampZmoneyZ smallmoneyZuniqueidentifierZ sql_variantc@seZdZdBddZddZddZdd Zd d Zd d ZddZ ddZ ddZ ddZ ddZ ddZddZddZddZd d!Zd"d#Zd$d%Zd&d'Zd(d)Zd*d+Zd,d-Zd.d/Zd0d1Zd2d3Zd4d5Zd6d7Zd8d9Zd:d;Zdd?Z!d@dAZ"dS)CMSTypeCompilerNcCsNt|ddrd|j}nd}|s&|j}|r6|d|}ddd||fDS)zYExtend a string-type declaration with standard SQL COLLATE annotations. collationNz COLLATE %s(%s) cSsg|]}|dk r|qSrrrcrrrrtsz*MSTypeCompiler._extend..)getattrrrrd)rspectype_rrrrr_extendcs   zMSTypeCompiler._extendcKs(t|dd}|dkrdSdd|iSdS)NrxrzFLOAT(%(precision)s)r rrrrxrrr visit_FLOATvs zMSTypeCompiler.visit_FLOATcKsdS)Nrrrrrrrr visit_TINYINT}szMSTypeCompiler.visit_TINYINTcKs$t|dd}|dk rd|SdSdS)NrxzTIME(%s)rrrrrr visit_TIMEs zMSTypeCompiler.visit_TIMEcKsdS)Nrrrrrrvisit_TIMESTAMPszMSTypeCompiler.visit_TIMESTAMPcKsdS)Nrrrrrrvisit_ROWVERSIONszMSTypeCompiler.visit_ROWVERSIONcKs&|jr|j|f|S|j|f|SdSr)timezonevisit_DATETIMEOFFSETvisit_DATETIMErrrrvisit_datetimeszMSTypeCompiler.visit_datetimecKs&t|dd}|dk rd|jSdSdS)NrxzDATETIMEOFFSET(%s)r)r rxrrrrrs  z#MSTypeCompiler.visit_DATETIMEOFFSETcKs$t|dd}|dk rd|SdSdS)Nrxz DATETIME2(%s)rrrrrrvisit_DATETIME2s zMSTypeCompiler.visit_DATETIME2cKsdS)Nrrrrrrvisit_SMALLDATETIMEsz"MSTypeCompiler.visit_SMALLDATETIMEcKs|j|f|Sr)visit_NVARCHARrrrr visit_unicodeszMSTypeCompiler.visit_unicodecKs(|jjr|j|f|S|j|f|SdSr)rdeprecate_large_types visit_VARCHAR visit_TEXTrrrr visit_textszMSTypeCompiler.visit_textcKs(|jjr|j|f|S|j|f|SdSr)rr!r visit_NTEXTrrrrvisit_unicode_textsz!MSTypeCompiler.visit_unicode_textcKs |d|S)Nrrrrrrr%szMSTypeCompiler.visit_NTEXTcKs |d|S)Nr%r'rrrrr#szMSTypeCompiler.visit_TEXTcKs|jd||jpddS)Nr&maxrrrrrrrr"szMSTypeCompiler.visit_VARCHARcKs |d|S)Nrr'rrrr visit_CHARszMSTypeCompiler.visit_CHARcKs |d|S)Nr!r'rrrr visit_NCHARszMSTypeCompiler.visit_NCHARcKs|jd||jpddSNr#r(r)r*rrrrrszMSTypeCompiler.visit_NVARCHARcKs,|jjtkr|j|f|S|j|f|SdSr)rserver_version_infoMS_2008_VERSIONrZ visit_DATErrrr visit_dates zMSTypeCompiler.visit_datecKs,|jjtkr|j|f|S|j|f|SdSr)rr.r/rrrrrr visit_times zMSTypeCompiler.visit_timecKs(|jjr|j|f|S|j|f|SdSr)rr!visit_VARBINARY visit_IMAGErrrrvisit_large_binarysz!MSTypeCompiler.visit_large_binarycKsdS)Nrrrrrrr3szMSTypeCompiler.visit_IMAGEcKsdS)Nrrrrrr visit_XMLszMSTypeCompiler.visit_XMLcKs|jd||jpddS)Nrr(r)r*rrrrr2szMSTypeCompiler.visit_VARBINARYcKs ||Sr) visit_BITrrrr visit_booleanszMSTypeCompiler.visit_booleancKsdS)Nrrrrrrr6szMSTypeCompiler.visit_BITcKs|jd|ddSr-r'rrrr visit_JSONszMSTypeCompiler.visit_JSONcKsdS)Nrrrrrr visit_MONEYszMSTypeCompiler.visit_MONEYcKsdS)Nrrrrrrvisit_SMALLMONEYszMSTypeCompiler.visit_SMALLMONEYcKsdS)Nrrrrrrvisit_UNIQUEIDENTIFIERsz%MSTypeCompiler.visit_UNIQUEIDENTIFIERcKsdS)Nrrrrrrvisit_SQL_VARIANTsz MSTypeCompiler.visit_SQL_VARIANT)N)#rrrrrrrrrrrrrr r$r&r%r#r"r+r,rr0r1r4r3r5r2r7r6r8r9r:r;r<rrrrrbs@ rcsxeZdZdZdZdZdZdZddZddZ ddZ d d Z e d d Z d dZfddZddZfddZZS)MSExecutionContextFNcCsH|jjs|j|d}n|}|jrD|jjrD|jjj}|||jj}|S)Nr)rZsupports_unicode_statements_encodercompiledZschema_translate_mappreparerZ_render_schema_translates)rZ statementencodedZrstrrr _opt_encodes zMSExecutionContext._opt_encodec Cs|jr|jjj}|j}|dk o*t|jt }|rh|jj}|j|j dkpb|j ob|j|j kpb||j k|_ nd|_ |jj o|o|jj o|j o|j |_|j r|j|j|d|j|d|dS)z#Activate IDENTITY_INSERT if needed.NrFzSET IDENTITY_INSERT %s ONr)isinsertr? compile_state dml_table_autoincrement_columnrrr reZcompiled_parametersZ_dict_parameters_enable_identity_insertinline returningZ executemany_select_lastrowidroot_connection_cursor_executerrBr format_table)rZtblZ id_columnZinsert_has_identityrDrrrpre_exec sF      zMSExecutionContext.pre_execc Cs|j}|js|js|jr"|jj|_|jrt|jj rD| |jdd|n| |jdd||j d}t |d|_ n6|js|js|jr|jjrt|j|jj|j |_|jr| |j|d|j|jjjd|dS)z#Disable IDENTITY_INSERT if enabled.z$SELECT scope_identity() AS lastrowidrzSELECT @@identity AS lastrowidrSET IDENTITY_INSERT %s OFFN)rKrCisupdateisdeleterr _rowcountrJruse_scope_identityrLfetchallr _lastrowidr?rI_cursorZ FullyBufferedCursorFetchStrategy descriptionZcursor_fetch_strategyrGrBrrMrDrE)rconnrowrrr post_exec9s\ zMSExecutionContext.post_execcCs|jSr)rUrrrr get_lastrowidisz MSExecutionContext.get_lastrowidcCs|jdk r|jS|jjSdSr)rRrrrrrrrls zMSExecutionContext.rowcountcCsH|jrDz(|j|d|j|jjjWnt k rBYnXdS)NrO) rGrrRrBrrMr?rDrE Exception)rerrrhandle_dbapi_exceptionssz)MSExecutionContext.handle_dbapi_exceptioncs |jr |jStt||SdSr)_result_strategyrr=get_result_cursor_strategy)rresultrrrr`s  z-MSExecutionContext.get_result_cursor_strategycCs|d|j||S)NzSELECT NEXT VALUE FOR %s)Z_execute_scalarrformat_sequence)rseqrrrr fire_sequences  z MSExecutionContext.fire_sequencecsBt|tjr2||jjkr2t|jtjr2|jjr2dStt | |Sr) r sa_schemaColumnrrFrr optionalrr=get_insert_default)rr?rrrrhs   z%MSExecutionContext.get_insert_default)rrrrGrJrUrRr_rBrNrZr[propertyrr^r`rdrhrrrrrr=s,0   r=cseZdZdZeejjdddddZfddZ fd d Z d d Z d dZ ddZ ddZddZddZddZddZfddZddZdd Zd!d"Zd#d$Zd%d&Zd'd(Zd)d*Zd+d,Zd-d.Zd/d0Ze dafd2d3 Ze fd4d5Ze dbfd7d8 Z d9d:Z!d;d<Z"d=d>Z#d?d@Z$fdAdBZ%dCdDZ&dEdFZ'fdGdHZ(dIdJZ)dKdLZ*dMdNZ+dOdPZ,dQdRZ-dSdTZ.dUdVZ/dWdXZ0dYdZZ1d[d\Z2d]d^Z3d_d`Z4Z5S)c MSSQLCompilerTZ dayofyearweekdayZ millisecond microsecond)ZdoyZdowZ milliseconds microsecondscsi|_tt|j||dSr) tablealiasesrrjr)rargsrrrrrszMSSQLCompiler.__init__csfdd}|S)Ncs8|jjr|f||Sttt|j}|||SdSr)rlegacy_schema_aliasingr rrjr)rrrr)rfnrrdecoratesz.decorater)rqrrrrqr_with_legacy_schema_aliasingsz*MSSQLCompiler._with_legacy_schema_aliasingcKsdS)NZCURRENT_TIMESTAMPrrrqrrrrvisit_now_funcszMSSQLCompiler.visit_now_funccKsdS)Nz GETDATE()rrurrrvisit_current_date_funcsz%MSSQLCompiler.visit_current_date_funccKsd|j|f|SNzLEN%sZfunction_argspecrurrrvisit_length_funcszMSSQLCompiler.visit_length_funccKsd|j|f|Srxryrurrrvisit_char_length_funcsz$MSSQLCompiler.visit_char_length_funccKs$d|j|jf||j|jf|fS)Nz%s + %srrgrrroperatorrrrrvisit_concat_op_binarysz$MSSQLCompiler.visit_concat_op_binarycKsdS)N1rrexprrrrr visit_trueszMSSQLCompiler.visit_truecKsdS)N0rrrrr visit_falseszMSSQLCompiler.visit_falsecKs$d|j|jf||j|jf|fS)NzCONTAINS (%s, %s)r|r}rrrvisit_match_op_binarysz#MSSQLCompiler.visit_match_op_binaryc sztt|j|f|}|jrv||rvd|d<|d|j||f|7}|jdk rv|jdrd|d7}|jdrv|d7}|S) z+MS-SQL puts TOP, it's version of LIMIT hereTliteral_executezTOP %s NrwzPERCENT with_tiesz WITH TIES ) rrjget_select_precolumns_has_row_limiting_clause_use_topr_get_limit_or_fetch _fetch_clause_fetch_clause_options)rrrsrrrrs   z#MSSQLCompiler.get_select_precolumnscCs|Srrrrrrrrget_from_hint_textsz MSSQLCompiler.get_from_hint_textcCs|Srrrrrrget_crud_hint_textsz MSSQLCompiler.get_crud_hint_textcCs|jdkr|jS|jSdSr)r _limit_clauserrrrrrs z!MSSQLCompiler._get_limit_or_fetchcCs6|jdko4||jp4||jo4|jdp4|jdS)Nrwr)_offset_clauseZ_simple_int_clauserrrrrrrrs     zMSSQLCompiler._use_topcKsdSNrrcsrrrr fetch_clauseszMSSQLCompiler.fetch_clausecKsdSrrrrrr limit_clauseszMSSQLCompiler.limit_clausecCs>|jjstd|jdk r:|jds0|jdr:tddS)NzLMSSQL requires an order_by when using an OFFSET or a non-simple LIMIT clauserwrz^MSSQL needs TOP to use PERCENT and/or WITH TIES. Only simple fetch without offset can be used.)_order_by_clauseclausesr CompileErrorrrrrr_check_can_use_fetch_limit s z(MSSQLCompiler._check_can_use_fetch_limitcKs|jjrx||sx||d}|jdk r<|j|jf|}nd}|d|7}||}|dk rt|d|j|f|7}|SdSdS)zdMSSQL 2012 supports OFFSET/FETCH operators Use it instead subquery with row_number rNrz OFFSET %s ROWSz FETCH FIRST %s ROWS ONLY)r_supports_offset_fetchrrrrr)rrrrZ offset_strlimitrrr_row_limit_clauses     zMSSQLCompiler._row_limit_clausecKs$d|j|jf||j|jf|fS)NzTRY_CAST (%s AS %s))rZclauseZ typeclause)relementrrrrvisit_try_cast9szMSSQLCompiler.visit_try_castc Ks|}|jr|jjs||st|dds||dd|jjD}||}|j }| }d|_ | t jj|ddd}t d}t jdd|jD}|dk r|||k}|dk r||||k}n|||k}|S|SdS) zLook for ``LIMIT`` and OFFSET in a select statement, and if so tries to wrap it in a subquery with ``row_number()`` criterion. MSSQL 2012 and above are excluded _mssql_visitNcSsg|]}t|qSr)sql_utilZunwrap_label_reference)relemrrrrOsz.T)order_bymssql_rncSsg|]}|jdkr|qS)r)rer rrrres )rrrrr rrrrrZ _generaterZ add_columnsr rZ ROW_NUMBERrvlabelraliasr?rr r) rZ select_stmtrrZ_order_by_clausesrZ offset_clauserZ limitselectrrrtranslate_select_structure?sP      z(MSSQLCompiler.translate_select_structureFc s`||ks |r tt|j|f|S||}|dk rH|j|fd|i|Stt|j|f|SdSN mssql_aliased)rrj visit_table_schema_aliased_tabler)rrriscrudrrrrrrss   zMSSQLCompiler.visit_tablec s|j|d<tt|j|f|Sr)rrrj visit_alias)rrrrrrrs zMSSQLCompiler.visit_aliasNc s|jdk r|js|jr|rx||j}|dk rxt||}|dk rd||j|j||j|jf|j t t |j |f|St t |j |fd|i|S)Nadd_to_result_map) rrPrQ is_subqueryrrZ_corresponding_column_or_errornamererrrj visit_column)rr?rrtZ convertedrrrrs4    zMSSQLCompiler.visit_columncCs:t|dddk r2||jkr(||j|<|j|SdSdS)Nr )r rnr)rrrrrrs   z#MSSQLCompiler._schema_aliased_tablecKs*|j|j|j}d||j|jf|fS)NzDATEPART(%s, %s)) extract_mapgetfieldrr)rextractrrrrr visit_extractszMSSQLCompiler.visit_extractcCsd|j|S)NzSAVE TRANSACTION %sr@Zformat_savepointrZsavepoint_stmtrrrvisit_savepointszMSSQLCompiler.visit_savepointcCsd|j|S)NzROLLBACK TRANSACTION %srrrrrvisit_rollback_to_savepointsz)MSSQLCompiler.visit_rollback_to_savepointc sZt|jtjrF|jtjkrFt|jtjsF|jt|j|j|jf|St t |j |f|S)z]Move bind parameters to the right-hand side of an operator, where possible. ) rrgrZ BindParameterr~eqrrZBinaryExpressionrrj visit_binary)rrrrrrrs    zMSSQLCompiler.visit_binarycsZjs jrjd}n jd}t|fddt|D}dd|S)NZinsertedZdeletedcs&g|]}|d|fiqS)Zresult_map_targets)Z_label_returning_columnZtraverser adapterrstmtrrrs z2MSSQLCompiler.returning_clause..zOUTPUT , ) rCrPrrrZ ClauseAdapterrZ_select_iterablesrd)rrZreturning_colstargetcolumnsrrrreturning_clauses    zMSSQLCompiler.returning_clausecCsdS)NZWITHr)r recursiverrrget_cte_preambleszMSSQLCompiler.get_cte_preamblecs.t|tjr|dStt||||SdSr)rrZFunctionrrrjlabel_select_column)rrr?asfromrrrrs   z!MSSQLCompiler.label_select_columncKsdSrr)rrrrrrfor_update_clauseszMSSQLCompiler.for_update_clausecKsH|r$|js$|jdks |jjs$dS|j|jf|}|r@d|SdSdS)Nrz ORDER BY )rZ_limit_offsetrrrr)rrrrrrrorder_by_clauses zMSSQLCompiler.order_by_clausec s&ddfdd|g|DS)aRender the UPDATE..FROM clause specific to MSSQL. In MSSQL, if the UPDATE statement involves an alias of the table to be updated, then the table itself must be added to the FROM list as well. Otherwise, it is optional. Here, we add it regardless. FROM rc3s&|]}|jfddVqdST)rZ fromhintsNZ_compiler_dispatchrr from_hintsrrrr sz3MSSQLCompiler.update_from_clause..rd)rZ update_stmt from_table extra_fromsrrrrrupdate_from_clauses z MSSQLCompiler.update_from_clausecCsd}|r d}|j|dd|dS)z=If we have extra froms make sure we render any alias as hint.FT)rrashintr)r delete_stmtrrrrrrdelete_table_clause sz!MSSQLCompiler.delete_table_clausec s&ddfdd|g|DS)zjRender the DELETE .. FROM clause specific to MSSQL. Yes, it has the FROM keyword twice. rrc3s&|]}|jfddVqdSrrrrrrr1sz9MSSQLCompiler.delete_extra_from_clause..r)rrrrrrrrrdelete_extra_from_clause)sz&MSSQLCompiler.delete_extra_from_clausecCsdS)NzSELECT 1 WHERE 1!=1r)rrrrrvisit_empty_set_expr6sz"MSSQLCompiler.visit_empty_set_exprcKsd||j||jfS)Nz*NOT EXISTS (SELECT %s INTERSECT SELECT %s)r|r}rrrvisit_is_distinct_from_binary9s  z+MSSQLCompiler.visit_is_distinct_from_binarycKsd||j||jfS)Nz&EXISTS (SELECT %s INTERSECT SELECT %s)r|r}rrr!visit_is_not_distinct_from_binary?s  z/MSSQLCompiler.visit_is_not_distinct_from_binarycKs`|jjtjkr2d|j|jf||j|jf|fSd|j|jf||j|jf|f}|jjtjkrd|j|jf||j|jf|f}n|jjtjkrd|j|jf||j|jf|t |jtj rdnd|jj |jj ff}nn|jjtj krd}nZ|jjtjkr,d|j|jf||j|jf|f}n$d |j|jf||j|jf|f}|d |d S) NzJSON_QUERY(%s, %s)z+CASE JSON_VALUE(%s, %s) WHEN NULL THEN NULLz(ELSE CAST(JSON_VALUE(%s, %s) AS INTEGER)z#ELSE CAST(JSON_VALUE(%s, %s) AS %s)rzNUMERIC(%s, %s)z0WHEN 'true' THEN 1 WHEN 'false' THEN 0 ELSE NULLzELSE JSON_VALUE(%s, %s)zELSE JSON_QUERY(%s, %s)r z END)rZ_type_affinitysqltypesrrrgrIntegerNumericrFloatrxscaleBooleanString)rrr~rZcase_expressiontype_expressionrrr _render_json_extract_from_binaryEsJ z.MSSQLCompiler._render_json_extract_from_binarycKs|j||f|Srrr}rrrvisit_json_getitem_op_binary{sz*MSSQLCompiler.visit_json_getitem_op_binarycKs|j||f|Srrr}rrr!visit_json_path_getitem_op_binary~sz/MSSQLCompiler.visit_json_path_getitem_op_binarycKsd|j|S)NzNEXT VALUE FOR %s)r@rb)rrcrrrrvisit_sequenceszMSSQLCompiler.visit_sequence)FF)N)6rrrZreturning_precedes_valuesrZ update_copyr SQLCompilerrrrtrvrwrzr{rrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrjsp   4  !   6rjcs4eZdZdZdZddZddZfddZZS) MSSQLStrictCompilerzA subclass of MSSQLCompiler which disables the usage of bind parameters where not allowed natively by MS-SQL. A dialect may use this compiler on a platform where native binds are used. TcKs,d|d<d|j|jf||j|jf|fS)NTrz%s IN %sr|r}rrrvisit_in_op_binarys z&MSSQLStrictCompiler.visit_in_op_binarycKs,d|d<d|j|jf||j|jf|fS)NTrz %s NOT IN %sr|r}rrrvisit_not_in_op_binarys z*MSSQLStrictCompiler.visit_not_in_op_binarycs6tt|tjr dt|dStt|||SdS)a5 For date and datetime values, convert to a string format acceptable to MSSQL. That seems to be the so-called ODBC canonical date format which looks like this: yyyy-mm-dd hh:mi:ss.mmm(24h) For other data types, call the base class implementation. rN) issubclassrrrrrrrender_literal_value)rrrrrrrs  z(MSSQLStrictCompiler.render_literal_value) rrrrZansi_bind_rulesrrrrrrrrrs rcsVeZdZddZdddZddZdd Zd d Zd d ZfddZ ddZ Z S) MSDDLCompilercKs~|j|}|jdk r,|d||j7}n|d|jjj|j|d7}|jdk r|jr||js|t |j t j s||j dks||jr|d7}n|jdkr|d7}|jdkrtd|jd}|d}|d }|dk s|dk r|jrtd td d |jr||j|jf|7}nj||jjks*|j dkrZt |j t rB|j jrZ||t||d 7}n ||}|dk rz|d|7}|S)Nr )rTz NOT NULLz NULLz;mssql requires Table-bound columns in order to generate DDLridentity_startidentity_incrementzzCannot specify options 'mssql_identity_start' and/or 'mssql_identity_increment' while also using the 'Identity' construct.z|The dialect options 'mssql_identity_start' and 'mssql_identity_increment' are deprecated. Use the 'Identity' object instead.1.4start incrementz DEFAULT )r@Z format_columncomputedrr type_compilerrnullableZ primary_keyrrrer autoincrementr]rrrdialect_optionsrwarn_deprecatedrFrgr Zget_column_default_string)rr?rZcolspecZd_optrrrrrrget_column_specificationsh               z&MSDDLCompiler.get_column_specificationFc s |jjd}jr(|d7}jdd}|dk rT|rL|d7}n|d7}|dj|djd fd d j Df7}jdd r؇fd djdd D}|dd fdd|D7}jdd}|dk rt t j |}jj|ddd}|d|7}|S)NzCREATE UNIQUE rr> CLUSTERED NONCLUSTERED zINDEX %s ON %s (%s)include_schemarc3s |]}jj|dddVqdS)FTZ include_tableZ literal_bindsN) sql_compilerr)rrrrrr s z3MSDDLCompiler.visit_create_index..includecs(g|] }t|tjr jj|n|qSr)rrrrr )rcol)r`rrr s z4MSDDLCompiler.visit_create_index..z INCLUDE (%s)csg|]}|jqSr)quoterr )r@rrr srFTr z WHERE )rZ_verify_index_tabler@rr_prepared_index_namerMrrdZ expressionsrexpectrZDDLExpressionRoler r)rrErrr>Z inclusions whereclauseZwhere_compiledr)r`r@rrvisit_create_indexsL         z MSDDLCompiler.visit_create_indexcCs$d|j|jdd|j|jjfS)Nz DROP INDEX %s ON %sFr)rrr@rMr)rrKrrrvisit_drop_index* szMSDDLCompiler.visit_drop_indexcst|dkrdSd}|jdk r2|dj|7}|d7}|jdd}|dk rf|r^|d7}n|d7}|d d fd d |D7}||7}|S) NrrCONSTRAINT %s z PRIMARY KEY rr>rrr rc3s|]}j|jVqdSrr@r rr rrrrA sz=MSDDLCompiler.visit_primary_key_constraint..lenrr@Zformat_constraintrrdZdefine_constraint_deferrability)rrArr>rrrvisit_primary_key_constraint0 s$     z*MSDDLCompiler.visit_primary_key_constraintcst|dkrdSd}|jdk r>j|}|dk r>|d|7}|d7}|jdd}|dk rr|rj|d7}n|d7}|d d fd d |D7}||7}|S) Nrrrrrr>rrr rc3s|]}j|jVqdSrrr rrrrX sz8MSDDLCompiler.visit_unique_constraint..r)rrArZformatted_namer>rrrvisit_unique_constraintG s$      z%MSDDLCompiler.visit_unique_constraintcCs.d|jj|jddd}|jdkr*|d7}|S)NzAS (%s)FTr z PERSISTED)r rsqltext persisted)r generatedrrrrvisit_computed_column^ s z#MSDDLCompiler.visit_computed_columnc sDd}|jjdk r(|jj}d|j|}tt|j|fd|i|S)Nz AS %sprefix)r data_typerrrrvisit_create_sequence)rrErrrrrrrg s  z#MSDDLCompiler.visit_create_sequencecKsTd}|jdk s|jdk rP|jdkr&dn|j}|jdkr:dn|j}|d||f7}|S)Nz IDENTITYrz(%s,%s)r)rr]rrrrrrrvisit_identity_columnp s z#MSDDLCompiler.visit_identity_column)F) rrrrrrrrrrr rrrrrrs? 6 rcs:eZdZeZfddZddZddZd dd ZZ S) MSIdentifierPreparercstt|j|dddddS)N[]F)Z initial_quoteZ final_quoteZquote_case_sensitive_collations)rr!r)rrrrrr| s  zMSIdentifierPreparer.__init__cCs |ddS)Nr#]]rrrrrr_escape_identifier sz'MSIdentifierPreparer._escape_identifiercCs |ddS)Nr$r#r%r&rrr_unescape_identifier sz)MSIdentifierPreparer._unescape_identifierNcCsX|dk rtjdddt|\}}|r@d||||f}n|rP||}nd}|S)z'Prepare a quoted table and schema name.NzThe IdentifierPreparer.quote_schema.force parameter is deprecated and will be removed in a future release. This flag has no effect on the behavior of the IdentifierPreparer.quote method; please refer to quoted_name().z1.3)versionz%s.%sr)rr_schema_elementsr )rr forcedbnameownerrarrr quote_schema s  z!MSIdentifierPreparer.quote_schema)N) rrrRESERVED_WORDSZreserved_wordsrr'r(r.rrrrrr!y s  r!csdfdd }t|S)Nc s(t||\}}t|||||||f|Sr_owner_plus_db _switch_db)r connectionr rr,r-rsrrwrap s z$_db_plus_owner_listing..wrap)Nr(rqr4rrsr_db_plus_owner_listing sr6csdfdd }t|S)Nc s*t||\}}t||||||||f |Srr0)rr3 tablenamer rr,r-rsrrr4 s z_db_plus_owner..wrap)Nr(r5rrsr_db_plus_owner sr8c Osl|r2|d}||kr2|d|jj|z|||WS|rf||krf|d|jj|XdS)Nzselect db_name()zuse %s)exec_driver_sqlscalarrrr )r,r3rqrrZ current_dbrrrr2 s  r2cCs*|sd|jfSd|krt|Sd|fSdS)N.)default_schema_namer*)rr rrrr1 s  r1cCsPt|tr|jrd|fS|tkr(t|Sg}d}d}d}td|D]f}|sNqD|dkr`d}d}qD|dkrnd}qD|s|dkr|r|d|n ||d}d}qD||7}qD|r||t|d krd|d d |d }}t d |d d rt|dd }n| d d}n"t|r4d|d }}nd\}}||ft|<||fS)NrFz (\[|\]|\.)r"Tr#r;z[%s]rrz .*\].*\[.*r )NN) rrr _memoized_schemarsplitappendrrdrlstriprstrip)r pushsymbolZbracketZ has_bracketstokenr,r-rrrr* sF       r*cs4eZdZdZdZdZdZeZdZ dZ dZ dZ dZ ejeejeejeejjeejjeejeejeejeiZejj de!j"iZe#Z#dZ$dZ%dZ&dZ'dZ(dZ)dZ*dZ+dZ,dZ-dZ.e/Z0e1Z2e3Z4e5Z6e7j8d d ife7j9d d ife7j:d d d d fe7j;d d d fgZddZ?e@dddddgZAddZBddZCfddZDddZEd d!ZFd"d#ZGd$d%ZHeId&d'ZJeId(d)ZKeLjMeNd*d+ZOeLjMd,d-ZPeLjMeNd.d/ZQeLjMeNd0d1ZReLjMeId2d3ZSeLjMeId4d5ZTd6d7ZUd8d9ZVeLjMeId:d;ZWeLjMeIdd?ZYZZS)A MSDialectrTFdborprrr>N)r>r r)rrc  sbt|pd|_||_||_||_|dk r:tdd||_tt |j f| ||_ ||_ ||_ dS)Nrz[The legacy_schema_aliasing parameter is deprecated and will be removed in a future release.r)r query_timeout schema_namerSr!rrrprrGrisolation_levelZ_json_serializerZ_json_deserializer) rrJrSrKrLr!Zjson_serializerZjson_deserializerrpoptsrrrr\ s zMSDialect.__init__cs |dtt|||dS)Nz$IF @@TRANCOUNT = 0 BEGIN TRANSACTION)r9rrG do_savepointrr3rrrrrN| s zMSDialect.do_savepointcCsdSrrrOrrrdo_release_savepoint szMSDialect.do_release_savepointZ SERIALIZABLEzREAD UNCOMMITTEDzREAD COMMITTEDzREPEATABLE READSNAPSHOTcCsf|dd}||jkr4td||jd|jf|}|d|||dkrb| dS)N_r zLInvalid value '%s' for isolation_level. Valid isolation levels for %s are %srz"SET TRANSACTION ISOLATION LEVEL %srQ) r_isolation_lookupr ArgumentErrorrrdrrRr=r@)rr3levelrrrrset_isolation_level s  zMSDialect.set_isolation_levelc Csd}d}|D]}|}zjz|d||d}Wn6|jjk rn}z|}WY W(q W5d}~XYnX|W SW5|Xq td||ft d||fdS)N)zsys.dm_exec_sessionszsys.dm_pdw_nodes_exec_sessionsa SELECT CASE transaction_isolation_level WHEN 0 THEN NULL WHEN 1 THEN 'READ UNCOMMITTED' WHEN 2 THEN 'READ COMMITTED' WHEN 3 THEN 'REPEATABLE READ' WHEN 4 THEN 'SERIALIZABLE' WHEN 5 THEN 'SNAPSHOT' END AS TRANSACTION_ISOLATION_LEVEL FROM %s where session_id = @@SPID rzQCould not fetch transaction isolation level, tried views: %s; final error was: %szgCan't fetch isolation level on this particular SQL Server version. tried views: %s; final error was: %s) rr=rRZfetchoneZdbapiErrorupperrwarnNotImplementedError)rr3Z last_errorZviewsrrvalerrrrrget_isolation_level s4  zMSDialect.get_isolation_levelcs&tt|||||dSr)rrG initialize_setup_version_attributes_setup_supports_nvarchar_maxrr3rrrr^ szMSDialect.initializecs"jdk rfdd}|SdSdS)Ncs|jdSr)rVrL)rXrrrconnect sz%MSDialect.on_connect..connect)rL)rrbrrr on_connect s  zMSDialect.on_connectcCsx|jdttddkr8tdddd|jD|jtkrHd|_|jdkr^|jt k|_|jop|jdd k|_ dS) Nrr1z[Unrecognized server version info '%s'. Some SQL Server features may not function properly.r;css|]}t|VqdSr)rrrrrr sz6MSDialect._setup_version_attributes..Tr.) r.listrangerrYrdr/Zsupports_multivalues_insertr!MS_2012_VERSIONrrrrrr_ s  z#MSDialect._setup_version_attributescCs<z|tdWntjk r0d|_YnXd|_dS)Nz0SELECT CAST('test max support' AS NVARCHAR(max))FT)r:r rrZ DBAPIError_supports_nvarchar_maxrarrrr` s z&MSDialect._setup_supports_nvarchar_maxcCs2td}||}|dk r(t|ddS|jSdS)NzSELECT schema_name()Tr>)r rr:rrK)rr3queryr<rrr_get_default_schema_name s    z"MSDialect._get_default_schema_namec Cs|||drZtj}t|jj|jj | |}| | d}| dk Stj}t|jjt|jjdk|jj|k}|r||jj|k}| |} | dk SdS)N#r BASE TABLE)Z_ensure_has_table_connection startswithischemamssql_temp_table_columnsr rr  table_namerrh_temp_table_name_like_patternrRrr:tablesand_ table_type table_schemafirst) rr3r7r,r-r rrrrar rrr has_table s*      zMSDialect.has_tablec CsNtj}t|jj|jj|k}|r8||jj|k}||}| dk Sr) rn sequencesr rr  sequence_namersequence_schemarRrv) rr3Z sequencenamer,r-r rxrr rrr has_sequence s  zMSDialect.has_sequencec KsBtj}t|jj}|r*||jj|k}||}dd|DS)NcSsg|] }|dqSrr)rrYrrrr7 sz0MSDialect.get_sequence_names..) rnrxr rr ryrrzrR) rr3r,r-r rrxrr rrrget_sequence_names, s  zMSDialect.get_sequence_namescKs4ttjjjtjjj}dd||D}|S)NcSsg|] }|dqSrrrrrrrr> sz.MSDialect.get_schema_names..)r rrnZschematar rKrrR)rr3rrZ schema_namesrrrget_schema_names9 s zMSDialect.get_schema_namesc KsTtj}t|jjt|jj|k|jj dk |jj}dd| |D}|S)NrlcSsg|] }|dqSrrr}rrrrO sz-MSDialect.get_table_names.. rnrrr rr rprrsrurtrrR) rr3r,r-r rrrrZ table_namesrrrget_table_namesA s   zMSDialect.get_table_namesc KsTtj}t|jjt|jj|k|jj dk |jj}dd| |D}|S)NZVIEWcSsg|] }|dqSrrr}rrrr` sz,MSDialect.get_view_names..r) rr3r,r-r rrrrZ view_namesrrrget_view_namesR s   zMSDialect.get_view_namesc Ksl|jtkrdnd}|jddtd|td|t td|t j t d}i} | D]N} | d | d d kggd | | d <| ddk rj| d| | d did<qj|jddtdtd|t td|t j t d}| D]T} | d | kr | drB| | d d| d n| | d d| d q t| S)Nzind.filter_definitionzNULL as filter_definitionTZ future_resultaselect ind.index_id, ind.is_unique, ind.name, %s from sys.indexes as ind join sys.tables as tab on ind.object_id=tab.object_id join sys.schemas as sch on sch.schema_id=tab.schema_id where tab.name = :tabname and sch.name=:schname and ind.is_primary_key=0 and ind.type != 0Ztabnameschname)rrZ is_uniquer)rr column_namesinclude_columnsZindex_idfilter_definitionrZ mssql_whereanselect ind_col.index_id, ind_col.object_id, col.name, ind_col.is_included_column from sys.columns as col join sys.tables as tab on tab.object_id=col.object_id join sys.index_columns as ind_col on (ind_col.column_id=col.column_id and ind_col.object_id=tab.object_id) join sys.schemas as sch on sch.schema_id=tab.schema_id where tab.name=:tabname and sch.name=:schnameZis_included_columnrr)r.r/execution_optionsrRr r bindparams bindparamrn CoerceUnicoderrUnicodemappingsrrArer) rr3r7r,r-r rrrpZindexesrYrrr get_indexesc s`         zMSDialect.get_indexesc KsH|tdtd|ttd|t}|rD|}|SdS)Nzselect definition from sys.sql_modules as mod, sys.views as views, sys.schemas as sch where mod.object_id=views.object_id and views.schema_id=sch.schema_id and views.name=:viewname and sch.name=:schnameviewnamer)rRr rrrrnrr:) rr3rr,r-r rrZview_defrrrget_view_definition s zMSDialect.get_view_definitioncCs||dsdndS)Nz##z___%r)rm)rr7rrrrq sz'MSDialect._temp_table_name_like_patternc Csz"|tdd||iWStjk r^}ztjt d||dW5d}~XYn<tj k r}ztjt d||dW5d}~XYnXdS)Nz_select table_schema, table_name from tempdb.information_schema.tables where table_name like :p1p1zFound more than one temporary table named '%s' in tempdb at this time. Cannot reliably resolve that name to its internal table name.)Zreplace_contextz6Unable to find a temporary table named '%s' in tempdb.) rRr rrqZonerZMultipleResultsFoundrZraise_ZUnreflectableTableErrorZ NoResultFoundZNoSuchTableError)rr3r7menerrr_get_internal_temp_table_name s2 z'MSDialect._get_internal_temp_table_namec% Ksx|d}|r&|||\}}tj}ntj}tj} tj} |rnt|j j |k|j j |k} |j j d|j j } n|j j |k} |j j } |j | t| j j t | k| j j|j jkddj | t| j j t | k| j j|j jkdd} |jr| j j}nt| j jtd}t||| j j| j j| j j| j j| | |j j}|jdd|}g}| D]}||j j}||j j!}||j j"dk}||j j#}||j j$}||j j%}||j j&}||j j'}||}|| j j}|| j j}|| j j}|| j j}|j()|d} i}!| t*t+t,t-t.t/t0t1t2j3f krT|dkr>d}||!d <|rT||!d <| dkrxt45d ||ft2j6} n6t7| t2j8r||!d <t7| t2j9s||!d <| f|!} || |||dk d}"|dk r|dk r||d|"d<|dk rf|dks|dkr i|"d<nZt:| t2j;r0t<=|}#t<=|}$n(t:| t2j>rPt?|}#t?|}$n|}#|}$|#|$d|"d<|@|"q^|S)Nrkr;T)ZonclauseZisouterirZYESr=rrz*Did not recognize type '%s' of column '%s'rxr)rrrrr)rrrr]r)ArmrrnrorZcomputed_columnsZidentity_columnsr rsr rprurdZ object_idrr column_namerh definitioncastr#r is_persisted is_identityZ seed_valueZincrement_valuer select_fromrordinal_positionrrRrrZ is_nullableZcharacter_maximum_lengthZnumeric_precisionZ numeric_scaleZcolumn_defaultZcollation_name ischema_namesrMSStringMSChar MSNVarcharMSNCharMSTextMSNTextMSBinary MSVarBinaryr LargeBinaryrrYZNULLTYPErrrrZ BigIntegerr'Z long_typerrrA)%rr3r7r,r-r rZ is_temp_tablerZ computed_colsZ identity_colsrZ full_namerdZcomputed_definitionrr colsrYrrrZcharlenZ numericprecZ numericscalerrrrrrrrrZcdictrrrrr get_columns s                            zMSDialect.get_columnsc Ksg}tj}tjd} t| jj|jj| jj  t |jj | jj k|jj | jj k| jj |k| jj |k|jj | jj} |jdd| } d} | D]:} d| |jjjkr|| d| dkr| | jj j} q|| dS)NCTrZPRIMARYZ COLUMN_NAME)constrained_columnsr)rn constraintskey_constraintsrr rr rZconstraint_typeconstraint_namerrsrurprrrrRrrrA)rr3r7r,r-r rZpkeysZTCrrr rrYrrrget_pk_constraint s6     zMSDialect.get_pk_constraintc Kstj}tjd}tjd} t|jj| jj| jj | jj|jj |jj |jj |jj t|jj |k|jj|k|jj|jjk|jj |jj k| jj |jjk| jj|jjk|jj| jjk|jj | jj} g} dd} t| } || D]} | \}}}}}}}}| |}||d<|dsV||d<|dk s<||krV|rN|d|}||d<|d |d }}||||qt| S) NrRcSsdgddgdS)N)rrreferred_schemareferred_tablereferred_columnsrrrrrfkey_rec s z,MSDialect.get_foreign_keys..fkey_recrrr;rrr)rnZref_constraintsrrr rr rrurprZ match_optionZ update_ruleZ delete_rulerrsZconstraint_schemaZunique_constraint_nameZunique_constraint_schemarrr defaultdictrRrTrArer)rr3r7r,r-r rZRRrrrZfkeysrr~ZscolZrschemaZrtblZrcolZrfknmZfkmatchZfkupruleZ fkdelruleZrecZ local_colsZ remote_colsrrrget_foreign_keys s\          zMSDialect.get_foreign_keys)NTrINNNNN)[rrrrZsupports_statement_cacheZsupports_default_valuesZsupports_empty_insertr=Zexecution_ctx_clsrSZmax_identifier_lengthrKZimplicit_returningZfull_returningrDateTimerDaterrrrZTimerrr UnicodeTextrZcolspecsrDefaultDialectZengine_config_typesrrZasboolrZsupports_sequencesZsequences_optionalZdefault_sequence_baseZsupports_native_booleanZ#non_native_boolean_check_constraintZsupports_unicode_bindsZpostfetch_lastrowidrrhrpr.rjZstatement_compilerrZ ddl_compilerrrr!r@reZPrimaryKeyConstraintZUniqueConstraintZIndexrfZconstruct_argumentsrrNrPrrSrVr]r^rcr_r`rjr8rwr{rcacher6r|rrrrrrqrrrrrrrrrrG s      +           F !  rG)rrrr~rrrrnjsonrrrrr r rer r r rrZenginerrVrrrrrrrrrrrrrrrrrr r!r"r#r$r%r&r'r)Zutil.langhelpersr*ZMS_2017_VERSIONZMS_2016_VERSIONZMS_2014_VERSIONrgr/ZMS_2005_VERSIONZMS_2000_VERSIONrr/rrrrrrZ_MSTimeobjectrrrrrrrrrrrZ_BinaryrrrrrrTextrrrZ TypeEnginerrrrZCastrrZ MSDateTimeZMSDateZMSRealZ MSTinyIntegerZMSTimeZMSSmallDateTimeZ MSDateTime2ZMSDateTimeOffsetrrrrrrrrZMSImageZMSBitZMSMoneyZ MSSmallMoneyZMSUniqueIdentifierZ MSVariantrZGenericTypeCompilerrZDefaultExecutionContextr=rrjrZ DDLCompilerrZIdentifierPreparerr!r6r8r2r1ZLRUCacher?r*rrGrrrrsi                                       ;  * / ! "&k/F,     7