Fokko commented on code in PR #7921:
URL: https://github.com/apache/iceberg/pull/7921#discussion_r1260910673


##########
python/pyiceberg/catalog/sql.py:
##########
@@ -0,0 +1,441 @@
+from typing import (
+    List,
+    Optional,
+    Set,
+    Union,
+)
+
+from sqlalchemy import (
+    String,
+    case,
+    create_engine,
+    delete,
+    select,
+    union,
+    update,
+)
+from sqlalchemy.exc import IntegrityError
+from sqlalchemy.orm import (
+    DeclarativeBase,
+    Mapped,
+    MappedAsDataclass,
+    Session,
+    mapped_column,
+)
+
+from pyiceberg.catalog import (
+    METADATA_LOCATION,
+    Catalog,
+    Identifier,
+    Properties,
+    PropertiesUpdateSummary,
+)
+from pyiceberg.exceptions import (
+    NamespaceAlreadyExistsError,
+    NamespaceNotEmptyError,
+    NoSuchNamespaceError,
+    NoSuchPropertyException,
+    NoSuchTableError,
+    TableAlreadyExistsError,
+)
+from pyiceberg.io import load_file_io
+from pyiceberg.partitioning import UNPARTITIONED_PARTITION_SPEC, PartitionSpec
+from pyiceberg.schema import Schema
+from pyiceberg.serializers import FromInputFile
+from pyiceberg.table import Table
+from pyiceberg.table.metadata import new_table_metadata
+from pyiceberg.table.sorting import UNSORTED_SORT_ORDER, SortOrder
+from pyiceberg.typedef import EMPTY_DICT
+
+
+class SQLCatalogBase(MappedAsDataclass, DeclarativeBase):

Review Comment:
   I find this name a bit confusing since it suggests that it is the base of 
the SqlCatalog. 
   ```suggestion
   class SQLCatalogBaseTable(MappedAsDataclass, DeclarativeBase):
   ```



##########
python/pyiceberg/catalog/sql.py:
##########
@@ -0,0 +1,441 @@
+from typing import (
+    List,
+    Optional,
+    Set,
+    Union,
+)
+
+from sqlalchemy import (
+    String,
+    case,
+    create_engine,
+    delete,
+    select,
+    union,
+    update,
+)
+from sqlalchemy.exc import IntegrityError
+from sqlalchemy.orm import (
+    DeclarativeBase,
+    Mapped,
+    MappedAsDataclass,
+    Session,
+    mapped_column,
+)
+
+from pyiceberg.catalog import (
+    METADATA_LOCATION,
+    Catalog,
+    Identifier,
+    Properties,
+    PropertiesUpdateSummary,
+)
+from pyiceberg.exceptions import (
+    NamespaceAlreadyExistsError,
+    NamespaceNotEmptyError,
+    NoSuchNamespaceError,
+    NoSuchPropertyException,
+    NoSuchTableError,
+    TableAlreadyExistsError,
+)
+from pyiceberg.io import load_file_io
+from pyiceberg.partitioning import UNPARTITIONED_PARTITION_SPEC, PartitionSpec
+from pyiceberg.schema import Schema
+from pyiceberg.serializers import FromInputFile
+from pyiceberg.table import Table
+from pyiceberg.table.metadata import new_table_metadata
+from pyiceberg.table.sorting import UNSORTED_SORT_ORDER, SortOrder
+from pyiceberg.typedef import EMPTY_DICT
+
+
+class SQLCatalogBase(MappedAsDataclass, DeclarativeBase):
+    pass
+
+
+class IcebergTables(SQLCatalogBase):
+    __tablename__ = "iceberg_tables"
+
+    catalog_name: Mapped[str] = mapped_column(String(255), nullable=False, 
primary_key=True)
+    table_namespace: Mapped[str] = mapped_column(String(255), nullable=False, 
primary_key=True)
+    table_name: Mapped[str] = mapped_column(String(255), nullable=False, 
primary_key=True)
+    metadata_location: Mapped[str] = mapped_column(String(1000), nullable=True)
+    previous_metadata_location: Mapped[str] = mapped_column(String(1000), 
nullable=True)
+
+
+class IcebergNamespaceProperties(SQLCatalogBase):
+    __tablename__ = "iceberg_namespace_properties"
+    # Catalog minimum Namespace Properties
+    NAMESPACE_MINIMAL_PROPERTIES = {"exists": "true"}
+
+    catalog_name: Mapped[str] = mapped_column(String(255), nullable=False, 
primary_key=True)
+    namespace: Mapped[str] = mapped_column(String(255), nullable=False, 
primary_key=True)
+    property_key: Mapped[str] = mapped_column(String(255), nullable=False, 
primary_key=True)
+    property_value: Mapped[str] = mapped_column(String(1000), nullable=True)
+
+
+class SQLCatalog(Catalog):
+    def __init__(self, name: str, **properties: str):
+        super().__init__(name, **properties)
+
+        if not (uri_prop := self.properties.get("uri")):
+            raise NoSuchPropertyException("SQL connection URI is required")
+        self.engine = create_engine(uri_prop, echo=True)
+
+    def _convert_orm_to_iceberg(self, orm_table: IcebergTables) -> Table:
+        # Check for expected properties.
+        if not (metadata_location := orm_table.metadata_location):
+            raise NoSuchTableError(f"Table property {METADATA_LOCATION} is 
missing")
+        if not (table_namespace := orm_table.table_namespace):
+            raise NoSuchTableError(f"Table property 
{IcebergTables.table_namespace} is missing")
+        if not (table_name := orm_table.table_name):
+            raise NoSuchTableError(f"Table property {IcebergTables.table_name} 
is missing")
+
+        io = load_file_io(properties=self.properties, 
location=metadata_location)
+        file = io.new_input(metadata_location)
+        metadata = FromInputFile.table_metadata(file)
+        return Table(
+            identifier=(self.name, table_namespace, table_name),
+            metadata=metadata,
+            metadata_location=metadata_location,
+            io=self._load_file_io(metadata.properties, metadata_location),
+        )
+
+    def create_table(
+        self,
+        identifier: Union[str, Identifier],
+        schema: Schema,
+        location: Optional[str] = None,
+        partition_spec: PartitionSpec = UNPARTITIONED_PARTITION_SPEC,
+        sort_order: SortOrder = UNSORTED_SORT_ORDER,
+        properties: Properties = EMPTY_DICT,
+    ) -> Table:
+        """
+        Create an Iceberg table.
+
+        Args:
+            identifier: Table identifier.
+            schema: Table's schema.
+            location: Location for the table. Optional Argument.
+            partition_spec: PartitionSpec for the table.
+            sort_order: SortOrder for the table.
+            properties: Table properties that can be a string based dictionary.
+
+        Returns:
+            Table: the created table instance.
+
+        Raises:
+            AlreadyExistsError: If a table with the name already exists.
+            ValueError: If the identifier is invalid, or no path is given to 
store metadata.
+
+        """
+        database_name, table_name = 
self.identifier_to_database_and_table(identifier)
+        if not self._namespace_exists(database_name):
+            raise NoSuchNamespaceError(f"Namespace does not exist: 
{database_name}")
+
+        location = self._resolve_table_location(location, database_name, 
table_name)
+        metadata_location = self._get_metadata_location(location=location)
+        metadata = new_table_metadata(
+            location=location, schema=schema, partition_spec=partition_spec, 
sort_order=sort_order, properties=properties
+        )
+        io = load_file_io(properties=self.properties, 
location=metadata_location)
+        self._write_metadata(metadata, io, metadata_location)
+
+        with Session(self.engine) as session:
+            try:
+                session.add(
+                    IcebergTables(
+                        catalog_name=self.name,
+                        table_namespace=database_name,
+                        table_name=table_name,
+                        metadata_location=metadata_location,
+                        previous_metadata_location=None,
+                    )
+                )
+                session.commit()
+            except IntegrityError as e:
+                raise TableAlreadyExistsError(f"Table 
{database_name}.{table_name} already exists") from e
+
+        return self.load_table(identifier=identifier)
+
+    def load_table(self, identifier: Union[str, Identifier]) -> Table:
+        """Loads the table's metadata and returns the table instance.
+
+        You can also use this method to check for table existence using 'try 
catalog.table() except NoSuchTableError'.
+        Note: This method doesn't scan data stored in the table.
+
+        Args:
+            identifier (str | Identifier): Table identifier.
+
+        Returns:
+            Table: the table instance with its metadata.
+
+        Raises:
+            NoSuchTableError: If a table with the name does not exist.
+        """
+        database_name, table_name = 
self.identifier_to_database_and_table(identifier, NoSuchTableError)
+        with Session(self.engine) as session:
+            stmt = select(IcebergTables).where(
+                IcebergTables.catalog_name == self.name,
+                IcebergTables.table_namespace == database_name,
+                IcebergTables.table_name == table_name,
+            )
+            result = session.scalar(stmt)
+            if result:
+                return self._convert_orm_to_iceberg(result)

Review Comment:
   We don't need the session here, right? Often it is best to return the 
session as soon as you don't need it anymore.



##########
python/pyiceberg/catalog/sql.py:
##########
@@ -0,0 +1,441 @@
+from typing import (
+    List,
+    Optional,
+    Set,
+    Union,
+)
+
+from sqlalchemy import (
+    String,
+    case,
+    create_engine,
+    delete,
+    select,
+    union,
+    update,
+)
+from sqlalchemy.exc import IntegrityError
+from sqlalchemy.orm import (
+    DeclarativeBase,
+    Mapped,
+    MappedAsDataclass,
+    Session,
+    mapped_column,
+)
+
+from pyiceberg.catalog import (
+    METADATA_LOCATION,
+    Catalog,
+    Identifier,
+    Properties,
+    PropertiesUpdateSummary,
+)
+from pyiceberg.exceptions import (
+    NamespaceAlreadyExistsError,
+    NamespaceNotEmptyError,
+    NoSuchNamespaceError,
+    NoSuchPropertyException,
+    NoSuchTableError,
+    TableAlreadyExistsError,
+)
+from pyiceberg.io import load_file_io
+from pyiceberg.partitioning import UNPARTITIONED_PARTITION_SPEC, PartitionSpec
+from pyiceberg.schema import Schema
+from pyiceberg.serializers import FromInputFile
+from pyiceberg.table import Table
+from pyiceberg.table.metadata import new_table_metadata
+from pyiceberg.table.sorting import UNSORTED_SORT_ORDER, SortOrder
+from pyiceberg.typedef import EMPTY_DICT
+
+
+class SQLCatalogBase(MappedAsDataclass, DeclarativeBase):
+    pass
+
+
+class IcebergTables(SQLCatalogBase):
+    __tablename__ = "iceberg_tables"
+
+    catalog_name: Mapped[str] = mapped_column(String(255), nullable=False, 
primary_key=True)
+    table_namespace: Mapped[str] = mapped_column(String(255), nullable=False, 
primary_key=True)
+    table_name: Mapped[str] = mapped_column(String(255), nullable=False, 
primary_key=True)
+    metadata_location: Mapped[str] = mapped_column(String(1000), nullable=True)
+    previous_metadata_location: Mapped[str] = mapped_column(String(1000), 
nullable=True)
+
+
+class IcebergNamespaceProperties(SQLCatalogBase):
+    __tablename__ = "iceberg_namespace_properties"
+    # Catalog minimum Namespace Properties
+    NAMESPACE_MINIMAL_PROPERTIES = {"exists": "true"}
+
+    catalog_name: Mapped[str] = mapped_column(String(255), nullable=False, 
primary_key=True)
+    namespace: Mapped[str] = mapped_column(String(255), nullable=False, 
primary_key=True)
+    property_key: Mapped[str] = mapped_column(String(255), nullable=False, 
primary_key=True)
+    property_value: Mapped[str] = mapped_column(String(1000), nullable=True)

Review Comment:
   Feels odd to me to have a null property.



##########
python/pyiceberg/catalog/sql.py:
##########
@@ -0,0 +1,441 @@
+from typing import (
+    List,
+    Optional,
+    Set,
+    Union,
+)
+
+from sqlalchemy import (
+    String,
+    case,
+    create_engine,
+    delete,
+    select,
+    union,
+    update,
+)
+from sqlalchemy.exc import IntegrityError
+from sqlalchemy.orm import (
+    DeclarativeBase,
+    Mapped,
+    MappedAsDataclass,
+    Session,
+    mapped_column,
+)
+
+from pyiceberg.catalog import (
+    METADATA_LOCATION,
+    Catalog,
+    Identifier,
+    Properties,
+    PropertiesUpdateSummary,
+)
+from pyiceberg.exceptions import (
+    NamespaceAlreadyExistsError,
+    NamespaceNotEmptyError,
+    NoSuchNamespaceError,
+    NoSuchPropertyException,
+    NoSuchTableError,
+    TableAlreadyExistsError,
+)
+from pyiceberg.io import load_file_io
+from pyiceberg.partitioning import UNPARTITIONED_PARTITION_SPEC, PartitionSpec
+from pyiceberg.schema import Schema
+from pyiceberg.serializers import FromInputFile
+from pyiceberg.table import Table
+from pyiceberg.table.metadata import new_table_metadata
+from pyiceberg.table.sorting import UNSORTED_SORT_ORDER, SortOrder
+from pyiceberg.typedef import EMPTY_DICT
+
+
+class SQLCatalogBase(MappedAsDataclass, DeclarativeBase):
+    pass
+
+
+class IcebergTables(SQLCatalogBase):
+    __tablename__ = "iceberg_tables"
+
+    catalog_name: Mapped[str] = mapped_column(String(255), nullable=False, 
primary_key=True)
+    table_namespace: Mapped[str] = mapped_column(String(255), nullable=False, 
primary_key=True)
+    table_name: Mapped[str] = mapped_column(String(255), nullable=False, 
primary_key=True)
+    metadata_location: Mapped[str] = mapped_column(String(1000), nullable=True)
+    previous_metadata_location: Mapped[str] = mapped_column(String(1000), 
nullable=True)
+
+
+class IcebergNamespaceProperties(SQLCatalogBase):
+    __tablename__ = "iceberg_namespace_properties"
+    # Catalog minimum Namespace Properties
+    NAMESPACE_MINIMAL_PROPERTIES = {"exists": "true"}
+
+    catalog_name: Mapped[str] = mapped_column(String(255), nullable=False, 
primary_key=True)
+    namespace: Mapped[str] = mapped_column(String(255), nullable=False, 
primary_key=True)
+    property_key: Mapped[str] = mapped_column(String(255), nullable=False, 
primary_key=True)
+    property_value: Mapped[str] = mapped_column(String(1000), nullable=True)
+
+
+class SQLCatalog(Catalog):
+    def __init__(self, name: str, **properties: str):
+        super().__init__(name, **properties)
+
+        if not (uri_prop := self.properties.get("uri")):
+            raise NoSuchPropertyException("SQL connection URI is required")
+        self.engine = create_engine(uri_prop, echo=True)
+
+    def _convert_orm_to_iceberg(self, orm_table: IcebergTables) -> Table:
+        # Check for expected properties.
+        if not (metadata_location := orm_table.metadata_location):
+            raise NoSuchTableError(f"Table property {METADATA_LOCATION} is 
missing")
+        if not (table_namespace := orm_table.table_namespace):
+            raise NoSuchTableError(f"Table property 
{IcebergTables.table_namespace} is missing")
+        if not (table_name := orm_table.table_name):
+            raise NoSuchTableError(f"Table property {IcebergTables.table_name} 
is missing")
+
+        io = load_file_io(properties=self.properties, 
location=metadata_location)
+        file = io.new_input(metadata_location)
+        metadata = FromInputFile.table_metadata(file)
+        return Table(
+            identifier=(self.name, table_namespace, table_name),
+            metadata=metadata,
+            metadata_location=metadata_location,
+            io=self._load_file_io(metadata.properties, metadata_location),
+        )
+
+    def create_table(
+        self,
+        identifier: Union[str, Identifier],
+        schema: Schema,
+        location: Optional[str] = None,
+        partition_spec: PartitionSpec = UNPARTITIONED_PARTITION_SPEC,
+        sort_order: SortOrder = UNSORTED_SORT_ORDER,
+        properties: Properties = EMPTY_DICT,
+    ) -> Table:
+        """
+        Create an Iceberg table.
+
+        Args:
+            identifier: Table identifier.
+            schema: Table's schema.
+            location: Location for the table. Optional Argument.
+            partition_spec: PartitionSpec for the table.
+            sort_order: SortOrder for the table.
+            properties: Table properties that can be a string based dictionary.
+
+        Returns:
+            Table: the created table instance.
+
+        Raises:
+            AlreadyExistsError: If a table with the name already exists.
+            ValueError: If the identifier is invalid, or no path is given to 
store metadata.
+
+        """
+        database_name, table_name = 
self.identifier_to_database_and_table(identifier)
+        if not self._namespace_exists(database_name):
+            raise NoSuchNamespaceError(f"Namespace does not exist: 
{database_name}")
+
+        location = self._resolve_table_location(location, database_name, 
table_name)
+        metadata_location = self._get_metadata_location(location=location)
+        metadata = new_table_metadata(
+            location=location, schema=schema, partition_spec=partition_spec, 
sort_order=sort_order, properties=properties
+        )
+        io = load_file_io(properties=self.properties, 
location=metadata_location)
+        self._write_metadata(metadata, io, metadata_location)
+
+        with Session(self.engine) as session:
+            try:
+                session.add(
+                    IcebergTables(
+                        catalog_name=self.name,
+                        table_namespace=database_name,
+                        table_name=table_name,
+                        metadata_location=metadata_location,
+                        previous_metadata_location=None,
+                    )
+                )
+                session.commit()
+            except IntegrityError as e:
+                raise TableAlreadyExistsError(f"Table 
{database_name}.{table_name} already exists") from e
+
+        return self.load_table(identifier=identifier)
+
+    def load_table(self, identifier: Union[str, Identifier]) -> Table:
+        """Loads the table's metadata and returns the table instance.
+
+        You can also use this method to check for table existence using 'try 
catalog.table() except NoSuchTableError'.
+        Note: This method doesn't scan data stored in the table.
+
+        Args:
+            identifier (str | Identifier): Table identifier.
+
+        Returns:
+            Table: the table instance with its metadata.
+
+        Raises:
+            NoSuchTableError: If a table with the name does not exist.
+        """
+        database_name, table_name = 
self.identifier_to_database_and_table(identifier, NoSuchTableError)
+        with Session(self.engine) as session:
+            stmt = select(IcebergTables).where(
+                IcebergTables.catalog_name == self.name,
+                IcebergTables.table_namespace == database_name,
+                IcebergTables.table_name == table_name,
+            )
+            result = session.scalar(stmt)
+            if result:
+                return self._convert_orm_to_iceberg(result)
+            raise NoSuchTableError(f"Table does not exist: 
{database_name}.{table_name}")
+
+    def drop_table(self, identifier: Union[str, Identifier]) -> None:
+        """Drop a table.
+
+        Args:
+            identifier (str | Identifier): Table identifier.
+
+        Raises:
+            NoSuchTableError: If a table with the name does not exist.
+        """
+        database_name, table_name = 
self.identifier_to_database_and_table(identifier, NoSuchTableError)
+        with Session(self.engine) as session:
+            deleted_rows = (
+                session.query(IcebergTables)
+                .where(
+                    IcebergTables.catalog_name == self.name,
+                    IcebergTables.table_namespace == database_name,
+                    IcebergTables.table_name == table_name,
+                )
+                .delete()
+            )
+            if deleted_rows < 1:
+                raise NoSuchTableError(f"Table does not exist: 
{database_name}.{table_name}")
+            session.commit()

Review Comment:
   If you prefer to have the `delete(IcebergTables)` pattern here as well, the 
following works for me:
   ```suggestion
           database_name, table_name = 
self.identifier_to_database_and_table(identifier, NoSuchTableError)
           with Session(self.engine) as session:
               res = session.execute(
                   delete(IcebergTables)
                   .where(
                       IcebergTables.catalog_name == self.name,
                       IcebergTables.table_namespace == database_name,
                       IcebergTables.table_name == table_name,
                   )
               )
               session.commit()
           if res.rowcount < 1:
               raise NoSuchTableError(f"Table does not exist: 
{database_name}.{table_name}")
   ```



##########
python/tests/catalog/test_sql_integration.py:
##########
@@ -0,0 +1,304 @@
+import os
+from typing import Generator, List
+
+import pytest
+
+from pyiceberg.catalog import Catalog
+from pyiceberg.catalog.sql import SQLCatalog, SQLCatalogBase
+from pyiceberg.exceptions import (
+    NamespaceAlreadyExistsError,
+    NamespaceNotEmptyError,
+    NoSuchNamespaceError,
+    NoSuchTableError,
+    TableAlreadyExistsError,
+)
+from pyiceberg.schema import Schema
+from tests.conftest import clean_up, get_bucket_name, get_s3_path
+
+# The number of tables/databases used in list_table/namespace test
+LIST_TEST_NUMBER = 2
+
+
[email protected](name="test_catalog", scope="module")
+def fixture_test_catalog() -> Generator[SQLCatalog, None, None]:
+    """The pre- and post-setting of SQL integration test."""
+    os.environ["AWS_TEST_BUCKET"] = "warehouse"
+    os.environ["AWS_REGION"] = "us-east-1"
+    os.environ["AWS_ACCESS_KEY_ID"] = "admin"
+    os.environ["AWS_SECRET_ACCESS_KEY"] = "password"
+
+    props = {
+        "uri": "sqlite+pysqlite:///:memory:",
+        "warehouse": get_s3_path(get_bucket_name()),
+        "s3.endpoint": "http://localhost:9000";,
+        "s3.access-key-id": "admin",
+        "s3.secret-access-key": "password",
+    }
+    test_catalog = SQLCatalog("test_sql_catalog", **props)
+    SQLCatalogBase.metadata.create_all(test_catalog.engine)

Review Comment:
   This is cool!



##########
python/pyiceberg/catalog/sql.py:
##########
@@ -0,0 +1,441 @@
+from typing import (
+    List,
+    Optional,
+    Set,
+    Union,
+)
+
+from sqlalchemy import (
+    String,
+    case,
+    create_engine,
+    delete,
+    select,
+    union,
+    update,
+)
+from sqlalchemy.exc import IntegrityError
+from sqlalchemy.orm import (
+    DeclarativeBase,
+    Mapped,
+    MappedAsDataclass,
+    Session,
+    mapped_column,
+)
+
+from pyiceberg.catalog import (
+    METADATA_LOCATION,
+    Catalog,
+    Identifier,
+    Properties,
+    PropertiesUpdateSummary,
+)
+from pyiceberg.exceptions import (
+    NamespaceAlreadyExistsError,
+    NamespaceNotEmptyError,
+    NoSuchNamespaceError,
+    NoSuchPropertyException,
+    NoSuchTableError,
+    TableAlreadyExistsError,
+)
+from pyiceberg.io import load_file_io
+from pyiceberg.partitioning import UNPARTITIONED_PARTITION_SPEC, PartitionSpec
+from pyiceberg.schema import Schema
+from pyiceberg.serializers import FromInputFile
+from pyiceberg.table import Table
+from pyiceberg.table.metadata import new_table_metadata
+from pyiceberg.table.sorting import UNSORTED_SORT_ORDER, SortOrder
+from pyiceberg.typedef import EMPTY_DICT
+
+
+class SQLCatalogBase(MappedAsDataclass, DeclarativeBase):
+    pass
+
+
+class IcebergTables(SQLCatalogBase):
+    __tablename__ = "iceberg_tables"
+
+    catalog_name: Mapped[str] = mapped_column(String(255), nullable=False, 
primary_key=True)
+    table_namespace: Mapped[str] = mapped_column(String(255), nullable=False, 
primary_key=True)
+    table_name: Mapped[str] = mapped_column(String(255), nullable=False, 
primary_key=True)
+    metadata_location: Mapped[str] = mapped_column(String(1000), nullable=True)
+    previous_metadata_location: Mapped[str] = mapped_column(String(1000), 
nullable=True)
+
+
+class IcebergNamespaceProperties(SQLCatalogBase):
+    __tablename__ = "iceberg_namespace_properties"
+    # Catalog minimum Namespace Properties
+    NAMESPACE_MINIMAL_PROPERTIES = {"exists": "true"}
+
+    catalog_name: Mapped[str] = mapped_column(String(255), nullable=False, 
primary_key=True)
+    namespace: Mapped[str] = mapped_column(String(255), nullable=False, 
primary_key=True)
+    property_key: Mapped[str] = mapped_column(String(255), nullable=False, 
primary_key=True)
+    property_value: Mapped[str] = mapped_column(String(1000), nullable=True)
+
+
+class SQLCatalog(Catalog):

Review Comment:
   I think this is more in line with the `RestCatalog`.
   ```suggestion
   class SqlCatalog(Catalog):
   ```



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: [email protected]

For queries about this service, please contact Infrastructure at:
[email protected]


---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]

Reply via email to