Delete an empty table partition filegroup.

create partition function pfOrders(int)
as range left
for values(34);
create partition scheme psOrders
as partition pfOrders
to (FG_34,FG_65)
go
I used the above code to create partition function and scheme. I switched out FG_65 filegroup data to another table. Now, I have to delete the filegroup FG_65 completely. I can delete the file successfully as it doesnot have any data. But, as the partition
scheme is using the FG_65 file group, I was unable to drop it.
I am not quite sure hoe can I use split or merge on this. If I merge 34, then the first partition can be deleted. I want to delete the last partition.

USE [master]
GO
DROP DATABASE PartitionMaintTest
GO
CREATE DATABASE PartitionMaintTest
GO
ALTER DATABASE PartitionMaintTest SET RECOVERY SIMPLE
GO
ALTER DATABASE [PartitionMaintTest] ADD FILEGROUP [FACT_EMPTY]
GO
ALTER DATABASE [PartitionMaintTest] ADD FILEGROUP [FACT_2008_M10]
ALTER DATABASE [PartitionMaintTest] ADD FILE ( 
NAME = N'FACT_EMPTY', 
FILENAME = N'e:\PartitionMaintTest_FACT_EMPTY.ndf' , 
SIZE = 512KB , FILEGROWTH = 512KB ) TO FILEGROUP [FACT_EMPTY]
GO
ALTER DATABASE [PartitionMaintTest] ADD FILE ( 
NAME = N'FACT_2008_M10_01', 
FILENAME = N'e:\PartitionMaintTest_FACT_2008_M10_01.ndf' , 
SIZE = 512KB , FILEGROWTH = 512KB ) TO FILEGROUP [FACT_2008_M10]
USE [PartitionMaintTest]
GO
/****** Object:  PartitionFunction [pf_FACT_DATA_DATE]    Script Date: 11/10/2010 20:45:07 ******/
CREATE PARTITION FUNCTION [pf_FACT_DATA_DATE](datetime) AS RANGE RIGHT FOR 
VALUES ( N'2008-10-01')
GO
/****** Object:  PartitionScheme [ps_FACT_DATA_DATE]    Script Date: 11/10/2010 20:45:29 ******/
CREATE PARTITION SCHEME [ps_FACT_DATA_DATE] AS PARTITION [pf_FACT_DATA_DATE] TO (
[FACT_EMPTY], [FACT_2008_M10])
CREATE TABLE Orders (
OrderCloseDate datetime not null, 
OrderNum int not null, 
[Status] char(2) null,
CustomerID int not null)
ON ps_FACT_DATA_DATE (OrderCloseDate)
GO
CREATE INDEX IX_Orders_CustomerID
ON Orders (OrderCloseDate, CustomerID)
ON ps_FACT_DATA_DATE (OrderCloseDate)
GO
-- Insert Sample Data
INSERT INTO Orders (OrderCloseDate, OrderNum, [Status], CustomerID) VALUES ('09/10/2008', 1, 'AE', 12288)
GO
INSERT INTO Orders (OrderCloseDate, OrderNum, [Status], CustomerID) VALUES ('10/10/2008', 2, 'AE', 12288)
GO
INSERT INTO Orders (OrderCloseDate, OrderNum, [Status], CustomerID) VALUES ('11/10/2008', 3, 'AE', 12388)
GO
INSERT INTO Orders (OrderCloseDate, OrderNum, [Status], CustomerID) VALUES ('12/10/2008', 4, 'AE', 12488)
GO
INSERT INTO Orders (OrderCloseDate, OrderNum, [Status], CustomerID) VALUES ('01/10/2009', 5, 'AE', 12588)
GO
INSERT INTO Orders (OrderCloseDate, OrderNum, [Status], CustomerID) VALUES ('02/10/2009', 6, 'AE', 12688)
GO
INSERT INTO Orders (OrderCloseDate, OrderNum, [Status], CustomerID) VALUES ('03/10/2009', 7, 'AE', 12788)
GO
INSERT INTO Orders (OrderCloseDate, OrderNum, [Status], CustomerID) VALUES ('04/10/2009', 8, 'AE', 12888)
GO
INSERT INTO Orders (OrderCloseDate, OrderNum, [Status], CustomerID) VALUES ('05/10/2009', 9, 'AE', 12988)
GO
INSERT INTO Orders (OrderCloseDate, OrderNum, [Status], CustomerID) VALUES ('06/10/2009', 10, 'AE', 12088)
GO
INSERT INTO Orders (OrderCloseDate, OrderNum, [Status], CustomerID) VALUES ('07/10/2009', 11, 'AE', 11288)
GO
INSERT INTO Orders (OrderCloseDate, OrderNum, [Status], CustomerID) VALUES ('08/10/2009', 12, 'AE', 12288)
GO
INSERT INTO Orders (OrderCloseDate, OrderNum, [Status], CustomerID) VALUES ('09/10/2009', 13, 'AE', 13288)
GO
INSERT INTO Orders (OrderCloseDate, OrderNum, [Status], CustomerID) VALUES ('10/10/2009', 14, 'AE', 14288)
GO
INSERT INTO Orders (OrderCloseDate, OrderNum, [Status], CustomerID) VALUES ('11/10/2009', 15, 'AE', 15288)
GO
INSERT INTO Orders (OrderCloseDate, OrderNum, [Status], CustomerID) VALUES ('12/10/2009', 16, 'AE', 16288)
GO
INSERT INTO Orders (OrderCloseDate, OrderNum, [Status], CustomerID) VALUES ('01/10/2010', 17, 'AE', 17288)
GO
INSERT INTO Orders (OrderCloseDate, OrderNum, [Status], CustomerID) VALUES ('02/10/2010', 18, 'AE', 18288)
GO
INSERT INTO Orders (OrderCloseDate, OrderNum, [Status], CustomerID) VALUES ('03/10/2010', 19, 'AE', 19288)
GO
INSERT INTO Orders (OrderCloseDate, OrderNum, [Status], CustomerID) VALUES ('04/10/2010', 20, 'AE', 12288)
GO
INSERT INTO Orders (OrderCloseDate, OrderNum, [Status], CustomerID) VALUES ('05/10/2010', 21, 'AE', 32288)
GO
INSERT INTO Orders (OrderCloseDate, OrderNum, [Status], CustomerID) VALUES ('06/10/2010', 22, 'AE', 52288)
GO
INSERT INTO Orders (OrderCloseDate, OrderNum, [Status], CustomerID) VALUES ('07/10/2010', 23, 'AE', 62288)
GO
INSERT INTO Orders (OrderCloseDate, OrderNum, [Status], CustomerID) VALUES ('08/10/2010', 24, 'AE', 92288)
GO
INSERT INTO Orders (OrderCloseDate, OrderNum, [Status], CustomerID) VALUES ('09/10/2010', 25, 'AE', 12283)
GO
INSERT INTO Orders (OrderCloseDate, OrderNum, [Status], CustomerID) VALUES ('10/10/2010', 26, 'AE', 12284)
GO
INSERT INTO Orders (OrderCloseDate, OrderNum, [Status], CustomerID) VALUES ('11/10/2010', 27, 'AE', 12285)
GO
INSERT INTO Orders (OrderCloseDate, OrderNum, [Status], CustomerID) VALUES ('12/10/2010', 28, 'AE', 12286)
GO SELECT
DB_NAME() AS 'DatabaseName'
,OBJECT_NAME(p.OBJECT_ID) AS 'TableName'
,p.index_id AS 'IndexId'
,CASE
WHEN p.index_id = 0 THEN 'HEAP'
ELSE i.name
END AS 'IndexName'
,p.partition_number AS 'PartitionNumber'
,prv_left.value AS 'LowerBoundary'
,prv_right.value AS 'UpperBoundary'
,ps.name as PartitionScheme
,pf.name as PartitionFunction
,CASE
WHEN fg.name IS NULL THEN ds.name
ELSE fg.name
END AS 'FileGroupName'
,CAST(p.used_page_count * 0.0078125 AS NUMERIC(18,2)) AS 'UsedPages_MB'
,CAST(p.in_row_data_page_count * 0.0078125 AS NUMERIC(18,2)) AS 'DataPages_MB'
,CAST(p.reserved_page_count * 0.0078125 AS NUMERIC(18,2)) AS 'ReservedPages_MB'
,CASE
WHEN p.index_id IN (0,1) THEN p.row_count
ELSE 0
END AS 'RowCount'
,CASE
WHEN p.index_id IN (0,1) THEN 'data'
ELSE 'index'
END 'Type'
FROM sys.dm_db_partition_stats p
INNER JOIN sys.indexes i
ON i.OBJECT_ID = p.OBJECT_ID AND i.index_id = p.index_id
INNER JOIN sys.data_spaces ds
ON ds.data_space_id = i.data_space_id
LEFT OUTER JOIN sys.partition_schemes ps
ON ps.data_space_id = i.data_space_id
LEFT OUTER JOIN sys.partition_functions pf
ON ps.function_id = pf.function_id
LEFT OUTER JOIN sys.destination_data_spaces dds
ON dds.partition_scheme_id = ps.data_space_id
AND dds.destination_id = p.partition_number
LEFT OUTER JOIN sys.filegroups fg
ON fg.data_space_id = dds.data_space_id
LEFT OUTER JOIN sys.partition_range_values prv_right
ON prv_right.function_id = ps.function_id
AND prv_right.boundary_id = p.partition_number
LEFT OUTER JOIN sys.partition_range_values prv_left
ON prv_left.function_id = ps.function_id
AND prv_left.boundary_id = p.partition_number - 1
WHERE
OBJECTPROPERTY(p.OBJECT_ID, 'ISMSSHipped') = 0
AND p.index_id IN (0,1)
ALTER PARTITION FUNCTION [pf_FACT_DATA_DATE]()
MERGE RANGE('2008-10-01 00:00:00.000')
ALTER PARTITION SCHEME [ps_FACT_DATA_DATE] 
NEXT USED [FACT_EMPTY]
SELECT
DB_NAME() AS 'DatabaseName'
,OBJECT_NAME(p.OBJECT_ID) AS 'TableName'
,p.index_id AS 'IndexId'
,CASE
WHEN p.index_id = 0 THEN 'HEAP'
ELSE i.name
END AS 'IndexName'
,p.partition_number AS 'PartitionNumber'
,prv_left.value AS 'LowerBoundary'
,prv_right.value AS 'UpperBoundary'
,ps.name as PartitionScheme
,pf.name as PartitionFunction
,CASE
WHEN fg.name IS NULL THEN ds.name
ELSE fg.name
END AS 'FileGroupName'
,CAST(p.used_page_count * 0.0078125 AS NUMERIC(18,2)) AS 'UsedPages_MB'
,CAST(p.in_row_data_page_count * 0.0078125 AS NUMERIC(18,2)) AS 'DataPages_MB'
,CAST(p.reserved_page_count * 0.0078125 AS NUMERIC(18,2)) AS 'ReservedPages_MB'
,CASE
WHEN p.index_id IN (0,1) THEN p.row_count
ELSE 0
END AS 'RowCount'
,CASE
WHEN p.index_id IN (0,1) THEN 'data'
ELSE 'index'
END 'Type'
FROM sys.dm_db_partition_stats p
INNER JOIN sys.indexes i
ON i.OBJECT_ID = p.OBJECT_ID AND i.index_id = p.index_id
INNER JOIN sys.data_spaces ds
ON ds.data_space_id = i.data_space_id
LEFT OUTER JOIN sys.partition_schemes ps
ON ps.data_space_id = i.data_space_id
LEFT OUTER JOIN sys.partition_functions pf
ON ps.function_id = pf.function_id
LEFT OUTER JOIN sys.destination_data_spaces dds
ON dds.partition_scheme_id = ps.data_space_id
AND dds.destination_id = p.partition_number
LEFT OUTER JOIN sys.filegroups fg
ON fg.data_space_id = dds.data_space_id
LEFT OUTER JOIN sys.partition_range_values prv_right
ON prv_right.function_id = ps.function_id
AND prv_right.boundary_id = p.partition_number
LEFT OUTER JOIN sys.partition_range_values prv_left
ON prv_left.function_id = ps.function_id
AND prv_left.boundary_id = p.partition_number - 1
WHERE
OBJECTPROPERTY(p.OBJECT_ID, 'ISMSSHipped') = 0
AND p.index_id IN (0,1)
GO
Best Regards,Uri Dimant SQL Server MVP,
http://sqlblog.com/blogs/uri_dimant/
MS SQL optimization: MS SQL Development and Optimization
MS SQL Consulting:
Large scale of database and data cleansing
Remote DBA Services:
Improves MS SQL Database Performance
SQL Server Integration Services:
Business Intelligence

Similar Messages

  • How to delete the data from partition table

    Hi all,
    Am very new to partition concepts in oracle..
    here my question is how to delete the data from partition table.
    is the below query will work ?
    delete from table1 partition (P_2008_1212)
    we have define range partition ...
    or help me how to delete the data from partition table.
    Thanks
    Sree

    874823 wrote:
    delete from table1 partition (P_2008_1212)This approach is wrong - as Andre pointed, this is not how partition tables should be used.
    Oracle supports different structures for data and indexes. A table can be a hash table or index organised table. It can have B+tree index. It can have bitmap indexes. It can be partitioned. Etc.
    How the table implements its structure is a physical design consideration.
    Application code should only deal with the logical data structure. How that data structure is physically implemented has no bearing on application. Does your application need to know what the indexes are and the names of the indexes,in order to use a table? Obviously not. So why then does your application need to know that the table is partitioned?
    When your application code starts referring directly to physical partitions, it needs to know HOW the table is partitioned. It needs to know WHAT partitions to use. It needs to know the names of the partitions. Etc.
    And why? All this means is increased complexity in application code as this code now needs to know and understand the physical data structure. This app code is now more complex, has more moving parts, will have more bugs, and will be more complex to maintain.
    Oracle can take an app SQL and it can determine (based on the predicates of the SQL), which partitions to use and not use for executing that SQL. All done totally transparently. The app does not need to know that the table is even partitioned.
    This is a crucial concept to understand and get right.

  • I am getting an error when resizing the Mac HD. I had a separate partition for Windows, but have deleted it and want to add the empty 50GB partition to my Mac HD.

    I am getting an error when resizing the Mac HD. I had a separate partition for Windows, but have deleted it and want to add the empty 50GB partition to my Mac HD.

    SparkchaserEd wrote:
    I am getting an error when resizing the Mac HD.
    What does this error say?

  • How to delete table partitions

    Hi all,
    I want to know how can I delete table partitions.I have partitioned table in schema A owned by user A.But I created table partitions in Schema B owned by user B..Now I want the partions from schema B,it says table doesn't exist and when I tried to delete the partitions from schema A ,it says partition doesn't exist,What to do ?
    Regards,
    AA.

    can you give the statements you tried (and as which user)?
    I couldn't follow your actions, sorry.
    Martin

  • Delete first empty row in a table

    Hi,
    Can you please provide the script for the below requirement.
    Need to delete First empty row in all the tables. (Tables having more number of empty rows. i need to delete first empty row only)
    Regards,
    Velu

    @Velu – the following blog post by Jongware might get you started:
    Tackling Tables through Scripting
    by: Jongware | April 8, 2013
    http://indesignsecrets.com/tackling-tables-through-scripting.php
    Document Object (DOM) documentation:
    Indesign JavaScript Help
    InDesign ExtendScript API (10.0)
    Table Row specifics:
    Adobe InDesign CS6 (8.0) Object Model JS: Row
    InDesign ExtendScript API (10.0)
    Uwe

  • Fact and dimension table partition

    My team is implementing new data-warehouse. I would like to know that when  should we plan to do partition of fact and dimension table, before data comes in or after?

    Hi,
    It is recommended to partition Fact table (Where we will have huge data). Automate the partition so that each day it will create a new partition to hold latest data (Split the previous partition into 2). Best practice is to create partition on transaction
    timestamps so load the incremental data into a empty table called (Table_IN) and then Switch that data into main table (Table). Make sure your tables (Table and Table_IN) should be on one file group.
    Refer below content for detailed info
    Designing and Administrating Partitions in SQL Server 2012
    A popular method of better managing large and active tables and indexes is the use of partitioning. Partitioning is a feature for segregating I/O workload within
    SQL Server database so that I/O can be better balanced against available I/O subsystems while providing better user response time, lower I/O latency, and faster backups and recovery. By partitioning tables and indexes across multiple filegroups, data retrieval
    and management is much quicker because only subsets of the data are used, meanwhile ensuring that the integrity of the database as a whole remains intact.
    Tip
    Partitioning is typically used for administrative or certain I/O performance scenarios. However, partitioning can also speed up some queries by enabling
    lock escalation to a single partition, rather than to an entire table. You must allow lock escalation to move up to the partition level by setting it with either the Lock Escalation option of Database Options page in SSMS or by using the LOCK_ESCALATION option
    of the ALTER TABLE statement.
    After a table or index is partitioned, data is stored horizontally across multiple filegroups, so groups of data are mapped to individual partitions. Typical
    scenarios for partitioning include large tables that become very difficult to manage, tables that are suffering performance degradation because of excessive I/O or blocking locks, table-centric maintenance processes that exceed the available time for maintenance,
    and moving historical data from the active portion of a table to a partition with less activity.
    Partitioning tables and indexes warrants a bit of planning before putting them into production. The usual approach to partitioning a table or index follows these
    steps:
    1. Create
    the filegroup(s) and file(s) used to hold the partitions defined by the partitioning scheme.
    2. Create
    a partition function to map the rows of the table or index to specific partitions based on the values in a specified column. A very common partitioning function is based on the creation date of the record.
    3. Create
    a partitioning scheme to map the partitions of the partitioned table to the specified filegroup(s) and, thereby, to specific locations on the Windows file system.
    4. Create
    the table or index (or ALTER an existing table or index) by specifying the partition scheme as the storage location for the partitioned object.
    Although Transact-SQL commands are available to perform every step described earlier, the Create Partition Wizard makes the entire process quick and easy through
    an intuitive point-and-click interface. The next section provides an overview of using the Create Partition Wizard in SQL Server 2012, and an example later in this section shows the Transact-SQL commands.
    Leveraging the Create Partition Wizard to Create Table and Index Partitions
    The Create Partition Wizard can be used to divide data in large tables across multiple filegroups to increase performance and can be invoked by right-clicking
    any table or index, selecting Storage, and then selecting Create Partition. The first step is to identify which columns to partition by reviewing all the columns available in the Available Partitioning Columns section located on the Select a Partitioning Column
    dialog box, as displayed in Figure 3.13. This screen also includes additional options such as the following:
    Figure 3.13. Selecting a partitioning column.
    The next screen is called Select a Partition Function. This page is used for specifying the partition function where the data will be partitioned. The options
    include using an existing partition or creating a new partition. The subsequent page is called New Partition Scheme. Here a DBA will conduct a mapping of the rows selected of tables being partitioned to a desired filegroup. Either a new partition scheme should
    be used or a new one needs to be created. The final screen is used for doing the actual mapping. On the Map Partitions page, specify the partitions to be used for each partition and then enter a range for the values of the partitions. The
    ranges and settings on the grid include the following:
    Note
    By opening the Set Boundary Values dialog box, a DBA can set boundary values based on dates (for example, partition everything in a column after a specific
    date). The data types are based on dates.
    Designing table and index partitions is a DBA task that typically requires a joint effort with the database development team. The DBA must have a strong understanding
    of the database, tables, and columns to make the correct choices for partitioning. For more information on partitioning, review Books Online.
    Enhancements to Partitioning in SQL Server 2012
    SQL Server 2012 now supports as many as 15,000 partitions. When using more than 1,000 partitions, Microsoft recommends that the instance of SQL Server have at
    least 16Gb of available memory. This recommendation particularly applies to partitioned indexes, especially those that are not aligned with the base table or with the clustered index of the table. Other Data Manipulation Language statements (DML) and Data
    Definition Language statements (DDL) may also run short of memory when processing on a large number of partitions.
    Certain DBCC commands may take longer to execute when processing a large number of partitions. On the other hand, a few DBCC commands can be scoped to the partition
    level and, if so, can be used to perform their function on a subset of data in the partitioned table.
    Queries may also benefit from a new query engine enhancement called partition elimination. SQL Server uses partition enhancement automatically if it is available.
    Here’s how it works. Assume a table has four partitions, with all the data for customers whose names begin with R, S, or T in the third partition. If a query’s WHERE clause
    filters on customer name looking for ‘System%’, the query engine knows that it needs only to partition three to answer
    the request. Thus, it might greatly reduce I/O for that query. On the other hand, some queries might take longer if there are more than 1,000 partitions and the query is not able to perform partition elimination.
    Finally, SQL Server 2012 introduces some changes and improvements to the algorithms used to calculate partitioned index statistics. Primarily, SQL Server 2012
    samples rows in a partitioned index when it is created or rebuilt, rather than scanning all available rows. This may sometimes result in somewhat different query behavior compared to the same queries running on SQL Server 2012.
    Administrating Data Using Partition Switching
    Partitioning is useful to access and manage a subset of data while losing none of the integrity of the entire data set. There is one limitation, though. When
    a partition is created on an existing table, new data is added to a specific partition or to the default partition if none is specified. That means the default partition might grow unwieldy if it is left unmanaged. (This concept is similar to how a clustered
    index needs to be rebuilt from time to time to reestablish its fill factor setting.)
    Switching partitions is a fast operation because no physical movement of data takes place. Instead, only the metadata pointers to the physical data are altered.
    You can alter partitions using SQL Server Management Studio or with the ALTER TABLE...SWITCH
    Transact-SQL statement. Both options enable you to ensure partitions are
    well maintained. For example, you can transfer subsets of data between partitions, move tables between partitions, or combine partitions together. Because the ALTER TABLE...SWITCH statement
    does not actually move the data, a few prerequisites must be in place:
    • Partitions must use the same column when switching between two partitions.
    • The source and target table must exist prior to the switch and must be on the same filegroup, along with their corresponding indexes,
    index partitions, and indexed view partitions.
    • The target partition must exist prior to the switch, and it must be empty, whether adding a table to an existing partitioned table
    or moving a partition from one table to another. The same holds true when moving a partitioned table to a nonpartitioned table structure.
    • The source and target tables must have the same columns in identical order with the same names, data types, and data type attributes
    (length, precision, scale, and nullability). Computed columns must have identical syntax, as well as primary key constraints. The tables must also have the same settings for ANSI_NULLS and QUOTED_IDENTIFIER properties.
    Clustered and nonclustered indexes must be identical. ROWGUID properties
    and XML schemas must match. Finally, settings for in-row data storage must also be the same.
    • The source and target tables must have matching nullability on the partitioning column. Although both NULL and NOT
    NULL are supported, NOT
    NULL is strongly recommended.
    Likewise, the ALTER TABLE...SWITCH statement
    will not work under certain circumstances:
    • Full-text indexes, XML indexes, and old-fashioned SQL Server rules are not allowed (though CHECK constraints
    are allowed).
    • Tables in a merge replication scheme are not allowed. Tables in a transactional replication scheme are allowed with special caveats.
    Triggers are allowed on tables but must not fire during the switch.
    • Indexes on the source and target table must reside on the same partition as the tables themselves.
    • Indexed views make partition switching difficult and have a lot of extra rules about how and when they can be switched. Refer to
    the SQL Server Books Online if you want to perform partition switching on tables containing indexed views.
    • Referential integrity can impact the use of partition switching. First, foreign keys on other tables cannot reference the source
    table. If the source table holds the primary key, it cannot have a primary or foreign key relationship with the target table. If the target table holds the foreign key, it cannot have a primary or foreign key relationship with the source table.
    In summary, simple tables can easily accommodate partition switching. The more complexity a source or target table exhibits, the more likely that careful planning
    and extra work will be required to even make partition switching possible, let alone efficient.
    Here’s an example where we create a partitioned table using a previously created partition scheme, called Date_Range_PartScheme1.
    We then create a new, nonpartitioned table identical to the partitioned table residing on the same filegroup. We finish up switching the data from the partitioned table into the nonpartitioned table:
    CREATE TABLE TransactionHistory_Partn1 (Xn_Hst_ID int, Xn_Type char(10)) ON Date_Range_PartScheme1 (Xn_Hst_ID) ; GO CREATE TABLE TransactionHistory_No_Partn (Xn_Hst_ID int, Xn_Type
    char(10)) ON main_filegroup ; GO ALTER TABLE TransactionHistory_Partn1 SWITCH partition1 TO TransactionHistory_No_Partn; GO
    The next section shows how to use a more sophisticated, but very popular, approach to partition switching called a sliding
    window partition.
    Example and Best Practices for Managing Sliding Window Partitions
    Assume that our AdventureWorks business is booming. The sales staff, and by extension the AdventureWorks2012 database, is very busy. We noticed over time that
    the TransactionHistory table is very active as sales transactions are first entered and are still very active over their first month in the database. But the older the transactions are, the less activity they see. Consequently, we’d like to automatically group
    transactions into four partitions per year, basically containing one quarter of the year’s data each, in a rolling partitioning. Any transaction older than one year will be purged or archived.
    The answer to a scenario like the preceding one is called a sliding window partition because
    we are constantly loading new data in and sliding old data over, eventually to be purged or archived. Before you begin, you must choose either a LEFT partition function window or a RIGHT partition function window:
    1. How
    data is handled varies according to the choice of LEFT or RIGHT partition function window:
    • With a LEFT strategy, partition1 holds the oldest data (Q4 data), partition2 holds data that is 6- to 9-months old (Q3), partition3
    holds data that is 3- to 6-months old (Q2), and partition4 holds recent data less than 3-months old.
    • With a RIGHT strategy, partition4 holds the holds data (Q4), partition3 holds Q3 data, partition2 holds Q2 data, and partition1
    holds recent data.
    • Following the best practice, make sure there are empty partitions on both the leading edge (partition0) and trailing edge (partition5)
    of the partition.
    • RIGHT range functions usually make more sense to most people because it is natural for most people to to start ranges at their lowest
    value and work upward from there.
    2. Assuming
    that a RIGHT partition function windows is used, we first use the SPLIT subclause of the ALTER PARTITION FUNCTIONstatement
    to split empty partition5 into two empty partitions, 5 and 6.
    3. We
    use the SWITCH subclause
    of ALTER TABLE to
    switch out partition4 to a staging table for archiving or simply to drop and purge the data. Partition4 is now empty.
    4. We
    can then use MERGE to
    combine the empty partitions 4 and 5, so that we’re back to the same number of partitions as when we started. This way, partition3 becomes the new partition4, partition2 becomes the new partition3, and partition1 becomes the new partition2.
    5. We
    can use SWITCH to
    push the new quarter’s data into the spot of partition1.
    Tip
    Use the $PARTITION system
    function to determine where a partition function places values within a range of partitions.
    Some best practices to consider for using a slide window partition include the following:
    • Load newest data into a heap, and then add indexes after the load is finished. Delete oldest data or, when working with very large
    data sets, drop the partition with the oldest data.
    • Keep an empty staging partition at the leftmost and rightmost ends of the partition range to ensure that the partitions split when
    loading in new data, and merge, after unloading old data, do not cause data movement.
    • Do not split or merge a partition already populated with data because this can cause severe locking and explosive log growth.
    • Create the load staging table in the same filegroup as the partition you are loading.
    • Create the unload staging table in the same filegroup as the partition you are deleting.
    • Don’t load a partition until its range boundary is met. For example, don’t create and load a partition meant to hold data that is
    one to two months older before the current data has aged one month. Instead, continue to allow the latest partition to accumulate data until the data is ready for a new, full partition.
    • Unload one partition at a time.
    • The ALTER TABLE...SWITCH statement
    issues a schema lock on the entire table. Keep this in mind if regular transactional activity is still going on while a table is being partitioned.
    Thanks Shiven:) If Answer is Helpful, Please Vote

  • Sliding Window Table Partitioning Problems with RANGE RIGHT, SPLIT, MERGE using Multiple File Groups

    There is misleading information in two system views (sys.data_spaces & sys.destination_data_spaces) about the physical location of data after a partitioning MERGE and before an INDEX REBUILD operation on a partitioned table. In SQL Server 2012 SP1 CU6,
    the script below (SQLCMD mode, set DataDrive  & LogDrive variables  for the runtime environment) will create a test database with file groups and files to support a partitioned table. The partition function and scheme spread the test data across
    4 files groups, an empty partition, file group and file are maintained at the start and end of the range. A problem occurs after the SWITCH and MERGE RANGE operations, the views sys.data_spaces & sys.destination_data_spaces show the logical, not the physical,
    location of data.
    --=================================================================================
    -- PartitionLabSetup_RangeRight.sql
    -- 001. Create test database
    -- 002. Add file groups and files
    -- 003. Create partition function and schema
    -- 004. Create and populate a test table
    --=================================================================================
    USE [master]
    GO
    -- 001 - Create Test Database
    :SETVAR DataDrive "D:\SQL\Data\"
    :SETVAR LogDrive "D:\SQL\Logs\"
    :SETVAR DatabaseName "workspace"
    :SETVAR TableName "TestTable"
    -- Drop if exists and create Database
    IF DATABASEPROPERTYEX(N'$(databasename)','Status') IS NOT NULL
    BEGIN
    ALTER DATABASE $(DatabaseName) SET SINGLE_USER WITH ROLLBACK IMMEDIATE
    DROP DATABASE $(DatabaseName)
    END
    CREATE DATABASE $(DatabaseName)
    ON
    ( NAME = $(DatabaseName)_data,
    FILENAME = N'$(DataDrive)$(DatabaseName)_data.mdf',
    SIZE = 10,
    MAXSIZE = 500,
    FILEGROWTH = 5 )
    LOG ON
    ( NAME = $(DatabaseName)_log,
    FILENAME = N'$(LogDrive)$(DatabaseName).ldf',
    SIZE = 5MB,
    MAXSIZE = 5000MB,
    FILEGROWTH = 5MB ) ;
    GO
    -- 002. Add file groups and files
    --:SETVAR DatabaseName "workspace"
    --:SETVAR TableName "TestTable"
    --:SETVAR DataDrive "D:\SQL\Data\"
    --:SETVAR LogDrive "D:\SQL\Logs\"
    DECLARE @nSQL NVARCHAR(2000) ;
    DECLARE @x INT = 1;
    WHILE @x <= 6
    BEGIN
    SELECT @nSQL =
    'ALTER DATABASE $(DatabaseName)
    ADD FILEGROUP $(TableName)_fg' + RTRIM(CAST(@x AS CHAR(5))) + ';
    ALTER DATABASE $(DatabaseName)
    ADD FILE
    NAME= ''$(TableName)_f' + CAST(@x AS CHAR(5)) + ''',
    FILENAME = ''$(DataDrive)\$(TableName)_f' + RTRIM(CAST(@x AS CHAR(5))) + '.ndf''
    TO FILEGROUP $(TableName)_fg' + RTRIM(CAST(@x AS CHAR(5))) + ';'
    EXEC sp_executeSQL @nSQL;
    SET @x = @x + 1;
    END
    -- 003. Create partition function and schema
    --:SETVAR TableName "TestTable"
    --:SETVAR DatabaseName "workspace"
    USE $(DatabaseName);
    CREATE PARTITION FUNCTION $(TableName)_func (int)
    AS RANGE RIGHT FOR VALUES
    0,
    15,
    30,
    45,
    60
    CREATE PARTITION SCHEME $(TableName)_scheme
    AS
    PARTITION $(TableName)_func
    TO
    $(TableName)_fg1,
    $(TableName)_fg2,
    $(TableName)_fg3,
    $(TableName)_fg4,
    $(TableName)_fg5,
    $(TableName)_fg6
    -- Create TestTable
    --:SETVAR TableName "TestTable"
    --:SETVAR BackupDrive "D:\SQL\Backups\"
    --:SETVAR DatabaseName "workspace"
    CREATE TABLE [dbo].$(TableName)(
    [Partition_PK] [int] NOT NULL,
    [GUID_PK] [uniqueidentifier] NOT NULL,
    [CreateDate] [datetime] NULL,
    [CreateServer] [nvarchar](50) NULL,
    [RandomNbr] [int] NULL,
    CONSTRAINT [PK_$(TableName)] PRIMARY KEY CLUSTERED
    [Partition_PK] ASC,
    [GUID_PK] ASC
    ) ON $(TableName)_scheme(Partition_PK)
    ) ON $(TableName)_scheme(Partition_PK)
    ALTER TABLE [dbo].$(TableName) ADD CONSTRAINT [DF_$(TableName)_GUID_PK] DEFAULT (newid()) FOR [GUID_PK]
    ALTER TABLE [dbo].$(TableName) ADD CONSTRAINT [DF_$(TableName)_CreateDate] DEFAULT (getdate()) FOR [CreateDate]
    ALTER TABLE [dbo].$(TableName) ADD CONSTRAINT [DF_$(TableName)_CreateServer] DEFAULT (@@servername) FOR [CreateServer]
    -- 004. Create and populate a test table
    -- Load TestTable Data - Seconds 0-59 are used as the Partitoning Key
    --:SETVAR TableName "TestTable"
    SET NOCOUNT ON;
    DECLARE @Now DATETIME = GETDATE()
    WHILE @Now > DATEADD(minute,-1,GETDATE())
    BEGIN
    INSERT INTO [dbo].$(TableName)
    ([Partition_PK]
    ,[RandomNbr])
    VALUES
    DATEPART(second,GETDATE())
    ,ROUND((RAND() * 100),0)
    END
    -- Confirm table partitioning - http://lextonr.wordpress.com/tag/sys-destination_data_spaces/
    SELECT
    N'DatabaseName' = DB_NAME()
    , N'SchemaName' = s.name
    , N'TableName' = o.name
    , N'IndexName' = i.name
    , N'IndexType' = i.type_desc
    , N'PartitionScheme' = ps.name
    , N'DataSpaceName' = ds.name
    , N'DataSpaceType' = ds.type_desc
    , N'PartitionFunction' = pf.name
    , N'PartitionNumber' = dds.destination_id
    , N'BoundaryValue' = prv.value
    , N'RightBoundary' = pf.boundary_value_on_right
    , N'PartitionFileGroup' = ds2.name
    , N'RowsOfData' = p.[rows]
    FROM
    sys.objects AS o
    INNER JOIN sys.schemas AS s
    ON o.[schema_id] = s.[schema_id]
    INNER JOIN sys.partitions AS p
    ON o.[object_id] = p.[object_id]
    INNER JOIN sys.indexes AS i
    ON p.[object_id] = i.[object_id]
    AND p.index_id = i.index_id
    INNER JOIN sys.data_spaces AS ds
    ON i.data_space_id = ds.data_space_id
    INNER JOIN sys.partition_schemes AS ps
    ON ds.data_space_id = ps.data_space_id
    INNER JOIN sys.partition_functions AS pf
    ON ps.function_id = pf.function_id
    LEFT OUTER JOIN sys.partition_range_values AS prv
    ON pf.function_id = prv.function_id
    AND p.partition_number = prv.boundary_id
    LEFT OUTER JOIN sys.destination_data_spaces AS dds
    ON ps.data_space_id = dds.partition_scheme_id
    AND p.partition_number = dds.destination_id
    LEFT OUTER JOIN sys.data_spaces AS ds2
    ON dds.data_space_id = ds2.data_space_id
    ORDER BY
    DatabaseName
    ,SchemaName
    ,TableName
    ,IndexName
    ,PartitionNumber
    --=================================================================================
    -- SECTION 2 - SWITCH OUT
    -- 001 - Create TestTableOut
    -- 002 - Switch out partition in range 0-14
    -- 003 - Merge range 0 -29
    -- 001. TestTableOut
    :SETVAR TableName "TestTable"
    IF OBJECT_ID('dbo.$(TableName)Out') IS NOT NULL
    DROP TABLE [dbo].[$(TableName)Out]
    CREATE TABLE [dbo].[$(TableName)Out](
    [Partition_PK] [int] NOT NULL,
    [GUID_PK] [uniqueidentifier] NOT NULL,
    [CreateDate] [datetime] NULL,
    [CreateServer] [nvarchar](50) NULL,
    [RandomNbr] [int] NULL,
    CONSTRAINT [PK_$(TableName)Out] PRIMARY KEY CLUSTERED
    [Partition_PK] ASC,
    [GUID_PK] ASC
    ) ON $(TableName)_fg2;
    GO
    -- 002 - Switch out partition in range 0-14
    --:SETVAR TableName "TestTable"
    ALTER TABLE dbo.$(TableName)
    SWITCH PARTITION 2 TO dbo.$(TableName)Out;
    -- 003 - Merge range 0 - 29
    --:SETVAR TableName "TestTable"
    ALTER PARTITION FUNCTION $(TableName)_func()
    MERGE RANGE (15);
    -- Confirm table partitioning
    -- Original source of this query - http://lextonr.wordpress.com/tag/sys-destination_data_spaces/
    SELECT
    N'DatabaseName' = DB_NAME()
    , N'SchemaName' = s.name
    , N'TableName' = o.name
    , N'IndexName' = i.name
    , N'IndexType' = i.type_desc
    , N'PartitionScheme' = ps.name
    , N'DataSpaceName' = ds.name
    , N'DataSpaceType' = ds.type_desc
    , N'PartitionFunction' = pf.name
    , N'PartitionNumber' = dds.destination_id
    , N'BoundaryValue' = prv.value
    , N'RightBoundary' = pf.boundary_value_on_right
    , N'PartitionFileGroup' = ds2.name
    , N'RowsOfData' = p.[rows]
    FROM
    sys.objects AS o
    INNER JOIN sys.schemas AS s
    ON o.[schema_id] = s.[schema_id]
    INNER JOIN sys.partitions AS p
    ON o.[object_id] = p.[object_id]
    INNER JOIN sys.indexes AS i
    ON p.[object_id] = i.[object_id]
    AND p.index_id = i.index_id
    INNER JOIN sys.data_spaces AS ds
    ON i.data_space_id = ds.data_space_id
    INNER JOIN sys.partition_schemes AS ps
    ON ds.data_space_id = ps.data_space_id
    INNER JOIN sys.partition_functions AS pf
    ON ps.function_id = pf.function_id
    LEFT OUTER JOIN sys.partition_range_values AS prv
    ON pf.function_id = prv.function_id
    AND p.partition_number = prv.boundary_id
    LEFT OUTER JOIN sys.destination_data_spaces AS dds
    ON ps.data_space_id = dds.partition_scheme_id
    AND p.partition_number = dds.destination_id
    LEFT OUTER JOIN sys.data_spaces AS ds2
    ON dds.data_space_id = ds2.data_space_id
    ORDER BY
    DatabaseName
    ,SchemaName
    ,TableName
    ,IndexName
    ,PartitionNumber  
    The table below shows the results of the ‘Confirm Table Partitioning’ query, before and after the MERGE.
    The T-SQL code below illustrates the problem.
    -- PartitionLab_RangeRight
    USE workspace;
    DROP TABLE dbo.TestTableOut;
    USE master;
    ALTER DATABASE workspace
    REMOVE FILE TestTable_f3 ;
    -- ERROR
    --Msg 5042, Level 16, State 1, Line 1
    --The file 'TestTable_f3 ' cannot be removed because it is not empty.
    ALTER DATABASE workspace
    REMOVE FILE TestTable_f2 ;
    -- Works surprisingly!!
    use workspace;
    ALTER INDEX [PK_TestTable] ON [dbo].[TestTable] REBUILD PARTITION = 2;
    --Msg 622, Level 16, State 3, Line 2
    --The filegroup "TestTable_fg2" has no files assigned to it. Tables, indexes, text columns, ntext columns, and image columns cannot be populated on this filegroup until a file is added.
    --The statement has been terminated.
    If you run ALTER INDEX REBUILD before trying to remove files from File Group 3, it works. Rerun the database setup script then the code below.
    -- RANGE RIGHT
    -- Rerun PartitionLabSetup_RangeRight.sql before the code below
    USE workspace;
    DROP TABLE dbo.TestTableOut;
    ALTER INDEX [PK_TestTable] ON [dbo].[TestTable] REBUILD PARTITION = 2;
    USE master;
    ALTER DATABASE workspace
    REMOVE FILE TestTable_f3;
    -- Works as expected!!
    The file in File Group 2 appears to contain data but it can be dropped. Although the system views are reporting the data in File Group 2, it still physically resides in File Group 3 and isn’t moved until the index is rebuilt. The RANGE RIGHT function means
    the left file group (File Group 2) is retained when splitting ranges.
    RANGE LEFT would have retained the data in File Group 3 where it already resided, no INDEX REBUILD is necessary to effectively complete the MERGE operation. The script below implements the same partitioning strategy (data distribution between partitions)
    on the test table but uses different boundary definitions and RANGE LEFT.
    --=================================================================================
    -- PartitionLabSetup_RangeLeft.sql
    -- 001. Create test database
    -- 002. Add file groups and files
    -- 003. Create partition function and schema
    -- 004. Create and populate a test table
    --=================================================================================
    USE [master]
    GO
    -- 001 - Create Test Database
    :SETVAR DataDrive "D:\SQL\Data\"
    :SETVAR LogDrive "D:\SQL\Logs\"
    :SETVAR DatabaseName "workspace"
    :SETVAR TableName "TestTable"
    -- Drop if exists and create Database
    IF DATABASEPROPERTYEX(N'$(databasename)','Status') IS NOT NULL
    BEGIN
    ALTER DATABASE $(DatabaseName) SET SINGLE_USER WITH ROLLBACK IMMEDIATE
    DROP DATABASE $(DatabaseName)
    END
    CREATE DATABASE $(DatabaseName)
    ON
    ( NAME = $(DatabaseName)_data,
    FILENAME = N'$(DataDrive)$(DatabaseName)_data.mdf',
    SIZE = 10,
    MAXSIZE = 500,
    FILEGROWTH = 5 )
    LOG ON
    ( NAME = $(DatabaseName)_log,
    FILENAME = N'$(LogDrive)$(DatabaseName).ldf',
    SIZE = 5MB,
    MAXSIZE = 5000MB,
    FILEGROWTH = 5MB ) ;
    GO
    -- 002. Add file groups and files
    --:SETVAR DatabaseName "workspace"
    --:SETVAR TableName "TestTable"
    --:SETVAR DataDrive "D:\SQL\Data\"
    --:SETVAR LogDrive "D:\SQL\Logs\"
    DECLARE @nSQL NVARCHAR(2000) ;
    DECLARE @x INT = 1;
    WHILE @x <= 6
    BEGIN
    SELECT @nSQL =
    'ALTER DATABASE $(DatabaseName)
    ADD FILEGROUP $(TableName)_fg' + RTRIM(CAST(@x AS CHAR(5))) + ';
    ALTER DATABASE $(DatabaseName)
    ADD FILE
    NAME= ''$(TableName)_f' + CAST(@x AS CHAR(5)) + ''',
    FILENAME = ''$(DataDrive)\$(TableName)_f' + RTRIM(CAST(@x AS CHAR(5))) + '.ndf''
    TO FILEGROUP $(TableName)_fg' + RTRIM(CAST(@x AS CHAR(5))) + ';'
    EXEC sp_executeSQL @nSQL;
    SET @x = @x + 1;
    END
    -- 003. Create partition function and schema
    --:SETVAR TableName "TestTable"
    --:SETVAR DatabaseName "workspace"
    USE $(DatabaseName);
    CREATE PARTITION FUNCTION $(TableName)_func (int)
    AS RANGE LEFT FOR VALUES
    -1,
    14,
    29,
    44,
    59
    CREATE PARTITION SCHEME $(TableName)_scheme
    AS
    PARTITION $(TableName)_func
    TO
    $(TableName)_fg1,
    $(TableName)_fg2,
    $(TableName)_fg3,
    $(TableName)_fg4,
    $(TableName)_fg5,
    $(TableName)_fg6
    -- Create TestTable
    --:SETVAR TableName "TestTable"
    --:SETVAR BackupDrive "D:\SQL\Backups\"
    --:SETVAR DatabaseName "workspace"
    CREATE TABLE [dbo].$(TableName)(
    [Partition_PK] [int] NOT NULL,
    [GUID_PK] [uniqueidentifier] NOT NULL,
    [CreateDate] [datetime] NULL,
    [CreateServer] [nvarchar](50) NULL,
    [RandomNbr] [int] NULL,
    CONSTRAINT [PK_$(TableName)] PRIMARY KEY CLUSTERED
    [Partition_PK] ASC,
    [GUID_PK] ASC
    ) ON $(TableName)_scheme(Partition_PK)
    ) ON $(TableName)_scheme(Partition_PK)
    ALTER TABLE [dbo].$(TableName) ADD CONSTRAINT [DF_$(TableName)_GUID_PK] DEFAULT (newid()) FOR [GUID_PK]
    ALTER TABLE [dbo].$(TableName) ADD CONSTRAINT [DF_$(TableName)_CreateDate] DEFAULT (getdate()) FOR [CreateDate]
    ALTER TABLE [dbo].$(TableName) ADD CONSTRAINT [DF_$(TableName)_CreateServer] DEFAULT (@@servername) FOR [CreateServer]
    -- 004. Create and populate a test table
    -- Load TestTable Data - Seconds 0-59 are used as the Partitoning Key
    --:SETVAR TableName "TestTable"
    SET NOCOUNT ON;
    DECLARE @Now DATETIME = GETDATE()
    WHILE @Now > DATEADD(minute,-1,GETDATE())
    BEGIN
    INSERT INTO [dbo].$(TableName)
    ([Partition_PK]
    ,[RandomNbr])
    VALUES
    DATEPART(second,GETDATE())
    ,ROUND((RAND() * 100),0)
    END
    -- Confirm table partitioning - http://lextonr.wordpress.com/tag/sys-destination_data_spaces/
    SELECT
    N'DatabaseName' = DB_NAME()
    , N'SchemaName' = s.name
    , N'TableName' = o.name
    , N'IndexName' = i.name
    , N'IndexType' = i.type_desc
    , N'PartitionScheme' = ps.name
    , N'DataSpaceName' = ds.name
    , N'DataSpaceType' = ds.type_desc
    , N'PartitionFunction' = pf.name
    , N'PartitionNumber' = dds.destination_id
    , N'BoundaryValue' = prv.value
    , N'RightBoundary' = pf.boundary_value_on_right
    , N'PartitionFileGroup' = ds2.name
    , N'RowsOfData' = p.[rows]
    FROM
    sys.objects AS o
    INNER JOIN sys.schemas AS s
    ON o.[schema_id] = s.[schema_id]
    INNER JOIN sys.partitions AS p
    ON o.[object_id] = p.[object_id]
    INNER JOIN sys.indexes AS i
    ON p.[object_id] = i.[object_id]
    AND p.index_id = i.index_id
    INNER JOIN sys.data_spaces AS ds
    ON i.data_space_id = ds.data_space_id
    INNER JOIN sys.partition_schemes AS ps
    ON ds.data_space_id = ps.data_space_id
    INNER JOIN sys.partition_functions AS pf
    ON ps.function_id = pf.function_id
    LEFT OUTER JOIN sys.partition_range_values AS prv
    ON pf.function_id = prv.function_id
    AND p.partition_number = prv.boundary_id
    LEFT OUTER JOIN sys.destination_data_spaces AS dds
    ON ps.data_space_id = dds.partition_scheme_id
    AND p.partition_number = dds.destination_id
    LEFT OUTER JOIN sys.data_spaces AS ds2
    ON dds.data_space_id = ds2.data_space_id
    ORDER BY
    DatabaseName
    ,SchemaName
    ,TableName
    ,IndexName
    ,PartitionNumber
    --=================================================================================
    -- SECTION 2 - SWITCH OUT
    -- 001 - Create TestTableOut
    -- 002 - Switch out partition in range 0-14
    -- 003 - Merge range 0 -29
    -- 001. TestTableOut
    :SETVAR TableName "TestTable"
    IF OBJECT_ID('dbo.$(TableName)Out') IS NOT NULL
    DROP TABLE [dbo].[$(TableName)Out]
    CREATE TABLE [dbo].[$(TableName)Out](
    [Partition_PK] [int] NOT NULL,
    [GUID_PK] [uniqueidentifier] NOT NULL,
    [CreateDate] [datetime] NULL,
    [CreateServer] [nvarchar](50) NULL,
    [RandomNbr] [int] NULL,
    CONSTRAINT [PK_$(TableName)Out] PRIMARY KEY CLUSTERED
    [Partition_PK] ASC,
    [GUID_PK] ASC
    ) ON $(TableName)_fg2;
    GO
    -- 002 - Switch out partition in range 0-14
    --:SETVAR TableName "TestTable"
    ALTER TABLE dbo.$(TableName)
    SWITCH PARTITION 2 TO dbo.$(TableName)Out;
    -- 003 - Merge range 0 - 29
    :SETVAR TableName "TestTable"
    ALTER PARTITION FUNCTION $(TableName)_func()
    MERGE RANGE (14);
    -- Confirm table partitioning
    -- Original source of this query - http://lextonr.wordpress.com/tag/sys-destination_data_spaces/
    SELECT
    N'DatabaseName' = DB_NAME()
    , N'SchemaName' = s.name
    , N'TableName' = o.name
    , N'IndexName' = i.name
    , N'IndexType' = i.type_desc
    , N'PartitionScheme' = ps.name
    , N'DataSpaceName' = ds.name
    , N'DataSpaceType' = ds.type_desc
    , N'PartitionFunction' = pf.name
    , N'PartitionNumber' = dds.destination_id
    , N'BoundaryValue' = prv.value
    , N'RightBoundary' = pf.boundary_value_on_right
    , N'PartitionFileGroup' = ds2.name
    , N'RowsOfData' = p.[rows]
    FROM
    sys.objects AS o
    INNER JOIN sys.schemas AS s
    ON o.[schema_id] = s.[schema_id]
    INNER JOIN sys.partitions AS p
    ON o.[object_id] = p.[object_id]
    INNER JOIN sys.indexes AS i
    ON p.[object_id] = i.[object_id]
    AND p.index_id = i.index_id
    INNER JOIN sys.data_spaces AS ds
    ON i.data_space_id = ds.data_space_id
    INNER JOIN sys.partition_schemes AS ps
    ON ds.data_space_id = ps.data_space_id
    INNER JOIN sys.partition_functions AS pf
    ON ps.function_id = pf.function_id
    LEFT OUTER JOIN sys.partition_range_values AS prv
    ON pf.function_id = prv.function_id
    AND p.partition_number = prv.boundary_id
    LEFT OUTER JOIN sys.destination_data_spaces AS dds
    ON ps.data_space_id = dds.partition_scheme_id
    AND p.partition_number = dds.destination_id
    LEFT OUTER JOIN sys.data_spaces AS ds2
    ON dds.data_space_id = ds2.data_space_id
    ORDER BY
    DatabaseName
    ,SchemaName
    ,TableName
    ,IndexName
    ,PartitionNumber
    The table below shows the results of the ‘Confirm Table Partitioning’ query, before and after the MERGE.
    The data in the File and File Group to be dropped (File Group 2) has already been switched out; File Group 3 contains the data so no index rebuild is needed to move data and complete the MERGE.
    RANGE RIGHT would not be a problem in a ‘Sliding Window’ if the same file group is used for all partitions, when they are created and dropped it introduces a dependency on full index rebuilds. Larger tables are typically partitioned and a full index rebuild
    might be an expensive operation. I’m not sure how a RANGE RIGHT partitioning strategy could be implemented, with an ascending partitioning key, using multiple file groups without having to move data. Using a single file group (multiple files) for all partitions
    within a table would avoid physically moving data between file groups; no index rebuild would be necessary to complete a MERGE and system views would accurately reflect the physical location of data. 
    If a RANGE RIGHT partition function is used, the data is physically in the wrong file group after the MERGE assuming a typical ascending partitioning key, and the 'Data Spaces' system views might be misleading. Thanks to Manuj and Chris for a lot of help
    investigating this.
    NOTE 10/03/2014 - The solution
    The solution is so easy it's embarrassing, I was using the wrong boundary points for the MERGE (both RANGE LEFT & RANGE RIGHT) to get rid of historic data.
    -- Wrong Boundary Point Range Right
    --ALTER PARTITION FUNCTION $(TableName)_func()
    --MERGE RANGE (15);
    -- Wrong Boundary Point Range Left
    --ALTER PARTITION FUNCTION $(TableName)_func()
    --MERGE RANGE (14);
    -- Correct Boundary Pounts for MERGE
    ALTER PARTITION FUNCTION $(TableName)_func()
    MERGE RANGE (0); -- or -1 for RANGE LEFT
    The empty, switched out partition (on File Group 2) is then MERGED with the empty partition maintained at the start of the range and no data movement is necessary. I retract the suggestion that a problem exists with RANGE RIGHT Sliding Windows using multiple
    file groups and apologize :-)

    Hi Paul Brewer,
    Thanks for your post and glad to hear that the issue is resolved. It is kind of you post a reply to share your solution. That way, other community members could benefit from your sharing.
    Regards.
    Sofiya Li
    Sofiya Li
    TechNet Community Support

  • Unable to delete data from a partition

    Hi,
    Unable to delete data from a partition
    I am using the command as:
    ALTER TABLE TEST DROP PARTITION DATE_20090820090000
    Its giving 0 rows deleted.Bu there are 200 rows for the Partition.
    The partition is getting deleted but the data remains.
    I want a command where the data in the partition also to be deleted along with partition.
    Any help will be needful for me

    SQL> CREATE TABLE range_part (
    prof_history_id NUMBER(10),
    person_id NUMBER(10) NOT NULL,
    organization_id NUMBER(10) NOT NULL,
    record_date DATE NOT NULL)
    PARTITION BY RANGE (record_date) (
    PARTITION yr0 VALUES LESS THAN (TO_DATE('01-JAN-2007','DD-MON-YYYY'))
    PARTITION yr7 VALUES LESS THAN (TO_DATE('01-JAN-2008','DD-MON-YYYY'))
    PARTITION yr8 VALUES LESS THAN (TO_DATE('01-JAN-2009','DD-MON-YYYY'))
    PARTITION yr9 VALUES LESS THAN (MAXVALUE) );
    Table created.
    SQL> INSERT INTO range_part VALUES (1, 1, 1, SYSDATE-720);
    INSERT INTO range_part VALUES (2, 2, 2, SYSDATE-360);
    INSERT INTO range_part VALUES (3, 3, 3, SYSDATE-180);
    INSERT INTO range_part VALUES (4, 4, 4, SYSDATE);
    1 row created.
    SQL>
    1 row created.
    SQL>
    1 row created.
    SQL>
    1 row created.
    SQL>
    SQL> commit;
    Commit complete.
    SQL> SELECT * FROM range_part;
    PROF_HISTORY_ID PERSON_ID ORGANIZATION_ID RECORD_DATE
    1 1 1 31-AUG-2007 05:53:22
    2 2 2 25-AUG-2008 05:53:22
    3 3 3 21-FEB-2009 05:53:22
    4 4 4 20-AUG-2009 05:53:22
    SQL> SELECT * FROM range_part PARTITION(yr7);
    PROF_HISTORY_ID PERSON_ID ORGANIZATION_ID RECORD_DATE
    1 1 1 31-AUG-2007 05:53:22
    SQL> alter table range_part drop partition yr7;
    Table altered.
    SQL> SELECT * FROM range_part PARTITION(yr7);
    SELECT * FROM range_part PARTITION(yr7)
    ERROR at line 1:
    ORA-02149: Specified partition does not exist
    SQL> SELECT * FROM range_part;
    PROF_HISTORY_ID PERSON_ID ORGANIZATION_ID RECORD_DATE
    2 2 2 25-AUG-2008 05:53:22
    3 3 3 21-FEB-2009 05:53:22
    4 4 4 20-AUG-2009 05:53:22

  • How to delete multiple empty lines in item assignment block in WebUI

    Hi Experts,
       When i am reading 'BTAdminI' context node data, I am getting empty lines along with data. I can able to delete only one empty lines when its executing second empty line its failing in loop. Can you suggest me how can i delete multiple empty lines. I am using below code.
    DATA: lr_entity1           TYPE REF TO cl_crm_bol_entity,
            lr_current          TYPE REF TO if_bol_bo_property_access,
            lr_iterator         TYPE REF TO if_bol_bo_col_iterator,
            lv_strukname        TYPE strukname,
            dref                TYPE REF TO data,
            lv_thtmlb_tableview TYPE REF TO cl_thtmlb_table_view,
            lr_cn               TYPE REF TO cl_bsp_wd_context_node_tv,
            cr_mixed_node_tv    TYPE REF TO cl_bsp_wd_mixed_node.
        FIELD-SYMBOLS: <fs_line_structure> TYPE data.
        lr_iterator = me->typed_context->btadmini->collection_wrapper->get_iterator( ).
        TRY.
            lr_current = lr_iterator->get_first( ).
          CATCH cx_root.
        ENDTRY.
        WHILE lr_current IS BOUND.
          lr_entity1 ?= lr_current.
          CHECK lr_entity1 IS BOUND.
          lr_entity1->get_attr_struct_name( RECEIVING rv_result = lv_strukname ).
          IF lv_strukname IS NOT INITIAL.
            CREATE DATA dref TYPE (lv_strukname).
            ASSIGN dref->* TO <fs_line_structure>.
            IF <fs_line_structure> IS ASSIGNED.
              lr_current->get_properties( IMPORTING es_attributes = <fs_line_structure> ).
              IF <fs_line_structure> IS INITIAL.
                me->typed_context->btadmini->collection_wrapper->remove( lr_current ).
                EXIT.
              ENDIF.
            ENDIF.
          ENDIF.
          TRY.
              lr_current = lr_iterator->get_next( ).
            CATCH cx_root.
          ENDTRY.
        ENDWHILE.

    Hi Nitish,
    Try below code instead of your code,
    Data:
    lt_empty_lines  TYPE TABLE OF REF TO cl_crm_bol_entity,
    lr_entity TYPE REF TO cl_crm_bol_entity.
    After getting collection from BTadmini use the below code.
    lr_iterator ?= lr_coll->get_iterator( ).
        lr_entity ?= lr_iterator->get_first( ).
        WHILE lr_entity IS BOUND.
          IF lr_entity->is_send_active( ) EQ abap_false.
            APPEND lr_entity TO lt_empty_lines.
          ENDIF.
          lr_entity ?= lr_iterator->get_next( ).
        ENDWHILE.
        LOOP AT lt_empty_lines INTO lr_entity.
          typed_context->btadmini->collection_wrapper->remove( lr_entity ).
          lr_entity->delete( ).
        ENDLOOP.
    Best Regards,
    Dharmakasi.

  • Space reusage after deletion in compressed table

    Hi,
    Some sources tell, that free space after DELETE in compressed table is not reused.
    For example, this http://www.trivadis.com/uploads/tx_cabagdownloadarea/table_compression2_0411EN.pdf
    Is it true?
    Unfortunatly I cannot reproduce it.

    Unfortunatly the question is still open.
    In Oracle 9i space, freed after DELETE in compressed block, was not reused in subsequent inserts.
    Isn't it?
    I saw many evidences from other people. One link I gave above.
    But in Oracle 10g I see another figures. After delete rows in compressed blocks, and subsequent insert into that block, block defragmented!
    Please, if who know any documentation about change in this behavior, please post links.
    p.s.
    in 10g:
    1. CTAS compress. Block is full.
    2. after, deleted every 4 from 5 rows.
    avsp=0x3b
    tosp=0x99e
    0x24:pri[0]     offs=0xeb0
    0x26:pri[1]     offs=0xea8 -- deleted
    0x28:pri[2]     offs=0xea0 -- deleted
    0x2a:pri[3]     offs=0xe98 -- deleted
    0x2c:pri[4]     offs=0xe90 -- deleted
    0x2e:pri[5]     offs=0xe88 -- live
    0x30:pri[6]     offs=0xe80 -- deleted
    0x32:pri[7]     offs=0xe78 -- deleted
    0x34:pri[8]     offs=0xe70 -- deleted
    0x36:pri[9]     offs=0xe68 -- deleted
    0x38:pri[10]     offs=0xe60 -- live
    0x3a:pri[11]     offs=0xe58 -- deleted
    0x3c:pri[12]     offs=0xe50 -- deleted
    0x3e:pri[13]     offs=0xe48 -- deleted
    0x40:pri[14]     offs=0xe40 -- deleted
    0x42:pri[15]     offs=0xe38  -- live
    0x44:pri[16]     offs=0xe30 -- deleted
    0x46:pri[17]     offs=0xe28 -- deleted
    0x48:pri[18]     offs=0xe20 -- deleted
    0x4a:pri[19]     offs=0xe18 -- deleted
    0x4c:pri[20]     offs=0xe10 -- live
    ...3. insert into table t select from ... where rownum < 1000;
    Inserted rows were inserted in a several blocks. Total number of not empty blocks was not changed. Chains did not occure.
    Block above looks as follow:
    avsp=0x7d
    tosp=0x7d
    0x24:pri[0]     offs=0xeb0
    0x26:pri[1]     offs=0x776 - new
    0x28:pri[2]     offs=0x84b - new
    0x2a:pri[3]     offs=0x920 - new
    0x2c:pri[4]     offs=0x9f5 - new
    0x2e:pri[5]     offs=0xea8 - old
    0x30:pri[6]     offs=0xaca - new
    0x32:pri[7]     offs=0xb9f - new
    0x34:pri[8]     offs=0x34d - new
    0x36:pri[9]     offs=0x422 - new
    0x38:pri[10]     offs=0xea0 - old
    0x3a:pri[11]     offs=0x4f7 - new
    0x3c:pri[12]     offs=0x5cc - new
    0x3e:pri[13]     offs=0x6a1 - new
    0x40:pri[14]     sfll=16  
    0x42:pri[15]     offs=0xe98 - old
    0x44:pri[16]     sfll=17
    0x46:pri[17]     sfll=18
    0x48:pri[18]     sfll=19
    0x4a:pri[19]     sfll=21
    0x4c:pri[20]     offs=0xe90 -- old
    0x4e:pri[21]     sfll=22
    0x50:pri[22]     sfll=23
    0x52:pri[23]     sfll=24
    0x54:pri[24]     sfll=26As we see, that old rows were defragmented, and repacked, and moved to the bottom of block.
    New rows (inserted after compressing of table) fill remaining space.
    So, deleted space was reused.

  • Long running table partitioning job

    Dear HANA grus,
    I've just finished table partitioning jobs for CDPOS(change document item) with 4 partitions by hash with 3 columns.
    Total data volumn is around 340GB and the table size was 32GB !!!!!
    (migration job was done without disabling CD, so currently deleting data on the table with RSCDOK99)
    Before partitioning, the data volumn of the table was around 32GB.
    After partitioning, the size has changed to 25GB.
    It took around One and half hour with exclusive lock as mentioned in the HANA adminitration guide.
    (It is QA DB, so less complaints)
    I thought that I might not can do this in the production DB.
    Does anyone hava any idea for accelerating this task?? (This is the fastest DBMS HANA!!!!)
    Or Do you have any plan for online table partitioning functionality??(To HANA Development team)
    Any comments would be appreciate.
    Cheers,
    - Jason

    Jason,
    looks like we're cross talking here...
    What was your rationale to partition the table in the first place?
           => To reduce deleting time of CDPOS            (As I mentioned it was almost 10% quantity of whole Data volume, So I would like to save deleting time of the table from any pros of partitioning table like partitioning pruning)
    Ok, I see where you're coming from, but did you ever try out if your idea would actually work?
    As deletion of data is heavily related with locating the records to be deleted, creating an index would have probably be the better choice.
    Thinking about it... you want to get rid of 10% of your data and in order to speed the overall process up, you decide to move 100% of the data into sets of 25% of the data - equally holding their 25% share of the 10% records to be deleted.
    The deletion then should run along these 4 sets of 25% of data.
    It's surely me, but where is the speedup potential here?
    How many unloads happened during the re-partitioning?
           => It was fully uploaded in the memory before partitioning the table by myself.(from HANA studio)
    I was actually asking about unloads _during_ the re-partitioning process. Check M_CS_UNLOADS for the time frame in question.
    How do the now longer running SQL statements look like?
           => As i mentioned selecting/deleting increased almost twice.
    That's not what I asked.
    Post the SQL statement text that was taking longer.
    What are the three columns you picked for partitioning?
           => mandant, objectclas, tabname(QA has 2 clients and each of them have nearly same rows of the table)
    Why those? Because these are the primary key?
    I wouldn't be surprised if the SQL statements only refer to e.g. MANDT and TABNAME in the WHERE clause.
    In that case the partition pruning cannot work and all partitions have to be searched.
    How did you come up with 4 partitions? Why not 13, 72 or 213?
           => I thought each partitions' size would be 8GB(32GB/4) if they are divided into same size(just simple thought), and 8GB size is almost same size like other largest top20 tables in the HANA DB.
    Alright, so basically that was arbitrary.
    For the last comment of your reply, most people would do partition for their existing large tables to get any benefit of partitioning(just like me). I think your comment can be applied for the newly inserting data.
    Well, not sure what "most people" would do.
    HASH partitioning a large existing table certainly is not an activity that is just triggered off in a production system. Adding partitions to a range partitions table however happens all the time.
    - Lars

  • Button to delete all empty rows

    I am in the process of creating an interactive form that has a button that add rows for the user to input issues and another button to add follow-ups to those instances. There are 3 different sets of these. However, we do not want to add a delete button to all rows, to allow for accidental deletion of already inputted data.  I would like to create a button that will delete all empty rows, including all subforms.  Either that, or upon saving the document, all empty rows would be deleted - whichever would be easier. Please help!  This seems like it will be a complicated task and I am not that well versed in LiveCycle to be able to figure this out on my own.
    Thank you!

    There is no doubt that looping through nested repeating objects is more complex:
    Here is the script for the first table with follow-up rows:
    // Technical
    var nCount3 = VitalsSubform.Technical._Instance1.count;
    for (var k=0; k<nCount3; k++)
         // this is script to remove the follow-up rows in the first table only
         var nCount6 = xfa.resolveNode("VitalsSubform.Technical.Instance1[" + k + "]").Table1._Row1.count;
         // loop through the rows in the follow-up table
         for (var i=0; i<nCount6; i++)
              if (xfa.resolveNode("VitalsSubform.Technical.Instance1[" + k + "].Table1.Row1[" + i + "]").Cell4.rawValue == null)
                   // remove null row
                   xfa.resolveNode("VitalsSubform.Technical.Instance1[" + k + "]").Table1._Row1.removeInstance(i);
                   // recount nCount in case a row has been deleted
                   nCount6 =xfa.resolveNode("VitalsSubform.Technical.Instance1[" + k + "]").Table1._Row1.count;
                   // account for the row that is now deleted
                   i = i-1;
         // now remove null statements
         if (xfa.resolveNode("VitalsSubform.Technical.Instance1[" + k + "]").Table1.Statement.Statement.rawValue == null)
              VitalsSubform.Technical._Instance1.removeInstance(k);
              var nCount3 = VitalsSubform.Technical._Instance1.count;
              k = k-1;
    It is by no means tested and could still be flaky.
    Have you considered a manual 'x'/delete button for the follow-up rows?
    Here is the form: https://acrobat.com/#d=JMGUKXNQ*qMD18S3W5VxSQ
    Niall

  • Delete from internal table

    Hi,
    I want to delete from internal table some regords.
    I write code:
    delete  isrot where bldat < '01.09.2005'.
    it doesn't work, what is wrong?
    regards,
    Joanna

    hi,
    you write the statement like....
    <b>delete FROM isrot where bldat < '01.09.2005'.</b>
    now it will work...
    To select the lines that you want to delete using a condition, use the following:
    <b>DELETE FROM <target> WHERE <cond> .</b>
    All of the lines in the database table that satisfy the conditions in the WHERE clause are deleted. The FROM expression must occur between the keyword and the database table.
    You should take particular care when programming the WHERE clause to ensure that you do not delete the wrong lines. For example, if you specify an empty internal table in a dynamic WHERE clause, all of the lines in the table are deleted.
    If at least one line is deleted, the system sets SY-SUBRC to 0, otherwise to 4. SY-DBCNT contains the number of lines deleted.
    follow this link for more information on internal table operation.
    http://help.sap.com/saphelp_nw04/helpdata/en/fc/eb3aef358411d1829f0000e829fbfe/content.htm
    regards,
    Ashok Reddy
    Message was edited by:
            Ashok Reddy

  • Table partitioning (intervel partitioning) on existing tables in oracle 11g

    Hi i'm newbie to table partitioning. I'm using 11g. I have table of size 32 gb (which has 22 million records) and i want to apply interval partition on that table. I created a empty table with a partition having columns same as source table and take dump of the source table and import into the new partition table. can you please suggest how to import table dump into new table? also is there any other better idea to do the same.

    Hi,
    imp user/password file=exp.dmp ignore=y
    The ignore=y causes the import to skip the table creation and continues to load all rows.
    On the other hand, you can insert data into the partitioned table with a subquery from the non-partitioned table such as follows;
    insert into patitioned_table
    select * from original_table;
    Hope it helps,

  • Unable to export empty tables

    I use this command:
    exp userid=clix_db file=clix.dmp log=dump.log consistent=y
    to export all tables from user "clix_db".
    But when I take a look into the log file, I notice that not all tables have been exported. All empty tables are missed.
    e.g. the table "LIC_REPORTLOG" does exists! OK, the table is empty.
    But it is not exported. When I import the clix.dmp into another empty database, there is no table "LIC_REPORTLOG"
    When I export only this table with
    exp userid=clix_db file=clix_lic.dmp log=dump_lic.log consistent=y tables=LIC_REPORTLOG
    I get an error message:
    EXP-00011: CLIX_DB.LIC_REPORTLOG does not exist
    If I take a look into the table "all_tables", everything is fine:
    Enter user-name: clix_db
    Enter password:
    Connected to: Oracle Database 11g Enterprise Edition Release 11.2.0.1.0 - Production
    With the Partitioning, OLAP, Data Mining and Real Application Testing options
    SQL> select table_name from all_tables where table_name='LIC_REPORTLOG';
    TABLE_NAME
    LIC_REPORTLOG
    SQL> select * from CLIX_DB.LIC_REPORTLOG;
    no rows selected
    Any ideas?

    Hello,
    I think that with such a new release you should use DataPump (expdp/impdb) to
    export Tables.
    For exporting a complete Schema you may use the following syntax:
    expdp {color:red}+user+{color}/{color:red}+password+{color} PARFILE=pfexport.txt_With pfexport.txt as bellow:_
    SCHEMAS={color:red}+schema_name+{color}
    FLASHBACK_TIME="TO_TIMESTAMP(to_char(SYSDATE,'DD-MM-YYYY HH24:MI:SS'),'DD-MM-YYYY HH24:MI:SS')"
    CONTENT=ALL
    DIRECTORY=EXP_DIR
    DUMPFILE={color:red}+dump_file_name+{color}
    LOGFILE={color:red}+log_file_name+{color}Then, in this example, you'll get the "dump file" and the "log file" into the EXP_DIR Oracle directory (if it exists).
    You can check your Oracle Directories with the following query:
    select * from dba_directories;Then, you can use one of these Directories or create a new one with the following statement
    CREATE OR REPLACE DIRECTORY {color:red}+directory_name+{color} AS '{color:red}+directory_path+{color}';
    GRANT READ,WRITE ON DIRECTORY {color:red}+directory_name+{color} TO {color:red}+user_name+{color};Hope it can help,
    Best regards,
    Jean-Valentin
    Edited by: Lubiez Jean-Valentin on Nov 28, 2009 12:08 PM

Maybe you are looking for

  • Itunes crasches on win xp home

    Hi I have two computers currently one dimension 3000 and one 5000. On dim3000 I run win XP Pro and Home on dim5000. On the home computer Itunes crasches(itunes has encounterd a problem and cant start, bla bla... hope you know what I mean) now and the

  • Permissions in libraries

    I am using SharePoint 2010. I have design permissions to a library. I have entered my password to the memory of my browser. But each time I try to open a document in the library, it requires me to enter user name and password (for every single docume

  • Personalization: Adding Items in the Review Page

    Hi, I have successfully added items in the previous page (Employment Type) before the review page. I was able to submit it and see the changes in APPS. Right now, whenever I reach the review page and I did some changes with my newly added items, thes

  • Canon MX310 printer problem

    I purchased a Gateway LX6810-01 from BestBuy this past February. It came bundled with a Canon MX310 printer. I can copy, print and fax with the printer, however I can not get the scan function to work. I worked with Canon for days and nothing they tr

  • Adobe Acrobat monthly reactivation, Adobe Acrobat monthly reactivation

    Hi, I want reactivate my subscription to Adobe ACrobat for one month but I don't find this option. Someone can help me to find where is this option? Thanks in advance for your help. Franck