Create table interval partition on a column timestamp with local time zone

Hi
Does anyone have an example for 11g on how to create a table with interval partitioning on a column defined as timestamp with local time zone. I know it's possible. the following does not work.
CREATE TABLE KOMODO_EXPIRED_RESULTS
TEST_EVENT_KEY NUMBER NOT NULL,
HPS_DEVICE_KEY NUMBER NOT NULL,
RCS_DEVICE_KEY NUMBER,
EVENT_START_TIMESTAMP TIMESTAMP(6) with local time zone NOT NULL,
BOOTROMVERSION NUMBER,
CHANNELNUMBER NUMBER,
CLIENTVERSION VARCHAR2(4000 BYTE),
ETHERNET_CRC_ERROR_COUNT NUMBER,
ETHERNET_DROPPED_PACKETS NUMBER,
ETHERNET_THROUGHPUT NUMBER,
ETHERNET_TRAFFIC_IN NUMBER,
ETHERNET_TRAFFIC_OUT NUMBER,
IPADDRESS VARCHAR2(4000 BYTE),
KOMODO_ID VARCHAR2(4000 BYTE),
LASTREBOOTTIME VARCHAR2(4000 BYTE),
OSVERSION VARCHAR2(4000 BYTE),
RECEIVER_AUDIOACCESSCONTROLER NUMBER,
RECEIVER_AUDIOBUFFEROVERFLOWS NUMBER,
RECEIVER_AUDIOBUFFERUNDERRUNS NUMBER,
RECEIVER_AUDIOCODEC VARCHAR2(4000 BYTE),
RECEIVER_AUDIODATADROPPED NUMBER,
RECEIVER_AUDIODATATHROUGHPUT NUMBER,
RECEIVER_AUDIODECODERERRORS NUMBER,
RECEIVER_AUDIODESCBUFFERUNDER NUMBER,
RECEIVER_AUDIODESCCRYPTOERROR NUMBER,
RECEIVER_AUDIODESCDATADROPPED NUMBER,
RECEIVER_AUDIODESCDATATHROUGH NUMBER,
RECEIVER_AUDIODESCDECODERERRO NUMBER,
RECEIVER_AUDIODESCDRMERRORS NUMBER,
RECEIVER_AUDIODESCPTSDELTA NUMBER,
RECEIVER_AUDIODESCPTSDELTAHAL NUMBER,
RECEIVER_AUDIODESCSAMPLESDROP NUMBER,
RECEIVER_AUDIODSPCRASHES VARCHAR2(4000 BYTE),
RECEIVER_AUDIOPTSDELTAHAL NUMBER,
RECEIVER_AUDIOSAMPLESDECODED NUMBER,
RECEIVER_AUDIOSAMPLESDROPPED NUMBER,
RECEIVER_AUDIOUNDERRUN NUMBER,
RECEIVER_BITRATE NUMBER,
RECEIVER_BUFFEROVERRUN NUMBER,
RECEIVER_BYTESCCRECEIVED NUMBER,
RECEIVER_BYTESRECEIVED NUMBER,
RECEIVER_CHANNEL NUMBER,
RECEIVER_DECODERSTALL NUMBER,
RECEIVER_DISCONTINUITIES NUMBER,
RECEIVER_DISCONTINUITIESPACKE NUMBER,
RECEIVER_DRIFT NUMBER,
RECEIVER_DROPPEDPACKETSUNTILR NUMBER,
RECEIVER_ECMLOOKUPERROR NUMBER,
RECEIVER_ECMPARSEERRORS NUMBER,
RECEIVER_PMTCHANGED NUMBER,
RECEIVER_REBUFFER NUMBER,
RECEIVER_SELECTCOMPONENTAUDIO NUMBER,
RECEIVER_TIMELINEDISCONTINUIT NUMBER,
RECEIVER_VIDEOACCESSCONTROLER NUMBER,
RECEIVER_VIDEOACCESSCONTROLUN NUMBER,
RECEIVER_VIDEOBUFFEROVERFLOWS NUMBER,
RECEIVER_VIDEOBUFFERUNDERRUNS NUMBER,
RECEIVER_VIDEOCODEC VARCHAR2(4000 BYTE),
RECEIVER_VIDEOCRYPTOERROR NUMBER,
RECEIVER_VIDEODATADROPPED NUMBER,
RECEIVER_VIDEODATATHROUGHPUT NUMBER,
RECEIVER_VIDEODECODERERRORS NUMBER,
RECEIVER_VIDEODRMERRORS NUMBER,
RECEIVER_VIDEODSPCRASHES VARCHAR2(4000 BYTE),
RECEIVER_VIDEOFIFORD NUMBER,
RECEIVER_VIDEOFIFOSIZE NUMBER,
RECEIVER_VIDEOFRAMESDECODED NUMBER,
RECEIVER_VIDEOFRAMESDROPPED NUMBER,
RECEIVER_VIDEOPTSDELTA NUMBER,
RECEIVER_VIDEOPTSDELTAHAL NUMBER,
RECEIVER_VIDEOUNDERRUN NUMBER,
SUBNETMASK VARCHAR2(4000 BYTE),
TUNER_BITRATE NUMBER,
TUNER_BUFFERFAILURE NUMBER,
TUNER_CCPACKETSRECEIVED NUMBER,
TUNER_CHANNEL NUMBER,
TUNER_DATATIMEOUTS NUMBER,
TUNER_DELIVERYMODE VARCHAR2(4000 BYTE),
TUNER_DROPPAST NUMBER,
TUNER_FILL NUMBER,
TUNER_HOLE NUMBER,
TUNER_HOLEDURINGBURST NUMBER,
TUNER_HOLEDURINGBURSTPACKETS NUMBER,
TUNER_HOLETOOLARGEPACKETS NUMBER,
TUNER_MAXIMUMHOLESIZE NUMBER,
TUNER_MULTICASTADDRESS VARCHAR2(4000 BYTE),
TUNER_MULTICASTJOINDELAY NUMBER,
TUNER_OUTOFORDER NUMBER,
TUNER_OVERFLOWRESET NUMBER,
TUNER_OVERFLOWRESETTIMES NUMBER,
TUNER_PACKETSEXPIRED NUMBER,
TUNER_PACKETSPROCESSED NUMBER,
TUNER_PACKETSRECEIVED NUMBER,
TUNER_PACKETSWITHOUTSESSION NUMBER,
TUNER_PARSEERRORS NUMBER,
TUNER_SRCUNAVAILABLERECEIVED NUMBER,
TUNER_TOTALHOLEPACKETS NUMBER,
TUNER_TOTALPACKETSEXPIRED NUMBER,
TUNER_TOTALPACKETSRECEIVED NUMBER,
TUNER_UNICASTADDRESS VARCHAR2(4000 BYTE),
RECEIVER_TUNEDFOR NUMBER,
MACADDRESS VARCHAR2(4000 BYTE),
RECEIVER_TOTALAVUNDERRUNS NUMBER,
RECEIVER_TOTALDISCONTINUITIES NUMBER,
SERVICEID VARCHAR2(4000 BYTE),
DRIVEPRESENT VARCHAR2(4000 BYTE),
STB_STATE VARCHAR2(32 BYTE),
PREV_EXPIRED NUMBER,
PREV_HOLES NUMBER,
PREV_RECEIVED NUMBER,
PREV_TIMESTAMP TIMESTAMP(6),
PREV_REBOOT VARCHAR2(4000 BYTE),
TOTALPACKETSEXPIRED_RATE NUMBER,
TOTALHOLEPACKETS_RATE NUMBER,
TOTALPACKETSRECEIVED_RATE NUMBER,
CONSTRAINT KOMODO_EXPIRED_RESULTS_PK
PRIMARY KEY
(HPS_DEVICE_KEY, EVENT_START_TIMESTAMP)
USING INDEX
TABLESPACE HPS_SUMMARY_INDEX
TABLESPACE HPS_SUMMARY_DATA
PARTITION BY RANGE (EVENT_START_TIMESTAMP)
INTERVAL( NUMTODSINTERVAL(1,'DAY'))
PARTITION DEFAULT_TIME_PART_01 VALUES LESS THAN (TIMESTAMP' 2010-08-01 00:00:00.000000000 +00:00')
LOGGING
COMPRESS FOR ALL OPERATIONS
TABLESPACE HPS_SUMMARY_DATA
NOCACHE
PARALLEL ( DEGREE DEFAULT INSTANCES DEFAULT )
MONITORING
/

I am not sure it can be done.
SQL> create table sales
  2  (
  3  sales_id number,
  4  sales_dt TIMESTAMP(6) with local time zone NOT NULL
  5  )
  6  partition by range (sales_dt)
  7  interval (numtoyminterval(1,'MONTH'))
  8  ( partition p0901 values less than (to_date('2009-02-01','yyyy-mm-dd')) );
create table sales
ERROR at line 1:
ORA-14751: Invalid data type for partitioning column of an interval partitioned
table
SQL> ed
Wrote file afiedt.buf
  1  create table sales
  2  (
  3  sales_id number,
  4  sales_dt TIMESTAMP(6)
  5  )
  6  partition by range (sales_dt)
  7  interval (numtoyminterval(1,'MONTH'))
  8* ( partition p0901 values less than (to_date('2009-02-01','yyyy-mm-dd')) )
SQL> /
Table created.

Similar Messages

  • When I create a new event in my calendar in my local time zone, my invitees receive it in GMT. Why how can this be fixed?

    When I create a new event in my calendar in my local time zone, my invitees receive it in GMT. Why how can this be fixed?

    Greetings jsubacus,
    Welcome to the Apple Support Communities!
    I understand that your calendar events are appearing in GMT on your calendar in Yosemite. This was a fix in iOS 8.2 for iCloud calendars. I would suggest that you make sure all of your devices are updated to the current software release to help resolve the issue. 
    iOS 8.2
     Fixes a timezone issue where Calendar events appear in GMT
    Cheers,
    Joe

  • Converting timestamp from local time to UTC

    Is there a smooth way to convert a timestamp containing local time to UTC time?
    The statement:
    CONVERT TIME STAMP time_stamp TIME ZONE tz
    INTO [DATE dat] [TIME tim]
    [DAYLIGHT SAVING TIME dst].
    Consideres time_tamp to be UTC time and then tries to convert it to the time zone specified in tz. What I would like to have done is the opposite, to have time_stamp represent the local time and then convert it back to UTC.
    TIA!
    /Armin

    Hi Armin.
    Just turn your statement and you will be fine
    DATA: date TYPE sydatum VALUE '20070525',
          time TYPE syuzeit VALUE '173030',
          cet  TYPE tzonref-tzone VALUE 'CET',
          utc  TYPE tzonref-tzone VALUE 'UTC',
          tstp TYPE timestamp.
    CONVERT DATE date TIME time INTO TIME STAMP tstp TIME ZONE cet .
    The output will be 15:30:30 at 25th of May 2007.
    I used CET as example, just use the timezone your date is representing. The result is formatted as UTC.
    Actually if you only have the timestamp, for this solution you have to convert it into date/time first.
    Use
    CONVERT TIME STAMP tstp TIME ZONE utc INTO DATE date TIME time.
    to do so.
    Regards,
    Timo.

  • Create table allows fancy schemas in columns based on user defined types

    I just found out that in XE one can create a table with the following definition:
    CREATE TABLE "TEST"."TEST" (
      "ID" NUMBER,
      "LINK" "ABC"."URITYPE",
      CONSTRAINT "PK_EMP" PRIMARY KEY ("ID")
       );even if schema "ABC" doesn't exist.
    Likewise you can specify an existing schema on which you don't hold any privilege.
    CREATE TABLE "TEST"."TEST" (
      "ID" NUMBER,
      "LINK" "SCOTT"."URITYPE",
      CONSTRAINT "PK_EMP" PRIMARY KEY ("ID")
       );If you get the DDL using SQLDeveloper, in the former case you'll get the column without schema, in the latter case, if will retain the schema specification.
    I discovered this because SQLDeveloper 2.1.1.64 seems to add the current schema to certain objects like XMLTYPE and URITYPE, even if their schema should be SYS.
    I imagine that there must some problem at the parser level but also in DBMS_METADATA.
    Flavio
    http://oraclequirks.blogspot.com

    I just found out that in XE one can create a table with the following definition:
    CREATE TABLE "TEST"."TEST" (
      "ID" NUMBER,
      "LINK" "ABC"."URITYPE",
      CONSTRAINT "PK_EMP" PRIMARY KEY ("ID")
       );even if schema "ABC" doesn't exist.
    Likewise you can specify an existing schema on which you don't hold any privilege.
    CREATE TABLE "TEST"."TEST" (
      "ID" NUMBER,
      "LINK" "SCOTT"."URITYPE",
      CONSTRAINT "PK_EMP" PRIMARY KEY ("ID")
       );If you get the DDL using SQLDeveloper, in the former case you'll get the column without schema, in the latter case, if will retain the schema specification.
    I discovered this because SQLDeveloper 2.1.1.64 seems to add the current schema to certain objects like XMLTYPE and URITYPE, even if their schema should be SYS.
    I imagine that there must some problem at the parser level but also in DBMS_METADATA.
    Flavio
    http://oraclequirks.blogspot.com

  • Create table as select (CTAS)statement is taking very long time.

    Hi All,
    One of my procedure run a create table as select statement every month.
    Usually it finishes in 20 mins. for 6172063 records and 1 hour in 13699067.
    But this time it is taking forever even for 38076 records.
    When I checked all it is doing is CPU usage. No I/O.
    I did a count(*) using the query it brought results fine.
    BUT CTAS keeps going on.
    I'm using Oracle 10.2.0.4 .
    main table temp_ip has 38076
    table nhs_opcs_hier has 26769 records.
    and table nhs_icd10_hier has 49551 records.
    Query is as follows:
    create table analytic_hes.temp_ip_hier as
    select b.*, (select nvl(max(hierarchy), 0)
    from ref_hd.nhs_opcs_hier a
    where fiscal_year = b.hd_spell_fiscal_year
    and a.code in
    (primary_PROCEDURE, secondary_procedure_1, secondary_procedure_2,
    secondary_procedure_3, secondary_procedure_4, secondary_procedure_5,
    secondary_procedure_6, secondary_procedure_7, secondary_procedure_8,
    secondary_procedure_9, secondary_procedure_10,
    secondary_procedure_11, secondary_procedure_12)) as hd_procedure_hierarchy,
    (select nvl(max(hierarchy), 0) from ref_hd.nhs_icd10_hier a
    where fiscal_year = b.hd_spell_fiscal_year
    and a.code in
    (primary_diagnosis, secondary_diagnosis_1,
    secondary_diagnosis_2, secondary_diagnosis_3,
    secondary_diagnosis_4, secondary_diagnosis_5,
    secondary_diagnosis_6, secondary_diagnosis_7,
    secondary_diagnosis_8, secondary_diagnosis_9,
    secondary_diagnosis_10, secondary_diagnosis_11,
    secondary_diagnosis_12, secondary_diagnosis_13,
    secondary_diagnosis_14)) as hd_diagnosis_hierarchy
    from analytic_hes.temp_ip b
    Any help would be greatly appreciated

    Hello
    This is a bit of a wild card I think because it's going to require 14 fill scans of the temp_ip table to unpivot the diagnosis and procedure codes, so it's lilkely this will run slower than the original. However, as this is a temporary table, I'm guessing you might have some control over its structure, or at least have the ability to sack it and try something else. If you are able to alter this table structure, you could make the query much simpler and most likely much quicker. I think you need to have a list of procedure codes for the fiscal year and a list of diagnosis codes for the fiscal year. I'm doing that through the big list of UNION ALL statements, but you may have a more efficient way to do it based on the core tables you're populating temp_ip from. Anyway, here it is (as far as I can tell this will do the same job)
    WITH codes AS
    (   SELECT
            bd.primary_key_column_s,
            hd_spell_fiscal_year,
            primary_PROCEDURE       procedure_code,
            primary_diagnosis       diagnosis_code,
        FROM
            temp_ip
        UNION ALL
        SELECT
            bd.primary_key_column_s,
            hd_spell_fiscal_year,
            secondary_procedure_1    procedure_code,
            secondary_diagnosis_1    diagnosis_code
        FROM
            temp_ip
        UNION ALL
        SELECT
            bd.primary_key_column_s,
            hd_spell_fiscal_year,
            secondary_procedure_2    procedure_code ,
            secondary_diagnosis_2    diagnosis_code     
        FROM
            temp_ip
        UNION ALL
        SELECT
            bd.primary_key_column_s,
            hd_spell_fiscal_year,
            secondary_procedure_3    procedure_code,
            secondary_diagnosis_3    diagnosis_code
        FROM
            temp_ip
        UNION ALL
        SELECT
            bd.primary_key_column_s,
            hd_spell_fiscal_year,
            secondary_procedure_4    procedure_code,
            secondary_diagnosis_4    diagnosis_code
        FROM
            temp_ip
        UNION ALL
        SELECT
            bd.primary_key_column_s,
            hd_spell_fiscal_year,
            secondary_procedure_5    procedure_code,
            secondary_diagnosis_5    diagnosis_code
        FROM
            temp_ip
        UNION ALL
        SELECT
            bd.primary_key_column_s,
            hd_spell_fiscal_year,
            secondary_procedure_6    procedure_code,
            secondary_diagnosis_6    diagnosis_code
        FROM
            temp_ip
        UNION ALL
        SELECT
            bd.primary_key_column_s,
            hd_spell_fiscal_year,
            secondary_procedure_7    procedure_code,
            secondary_diagnosis_7    diagnosis_code
        FROM
            temp_ip
        UNION ALL
        SELECT
            bd.primary_key_column_s,
            hd_spell_fiscal_year,
            secondary_procedure_8    procedure_code,
            secondary_diagnosis_8    diagnosis_code
        FROM
            temp_ip
        UNION ALL
        SELECT
            bd.primary_key_column_s,
            hd_spell_fiscal_year,
            secondary_procedure_9    procedure_code,
            secondary_diagnosis_9    diagnosis_code
        FROM
            temp_ip
        UNION ALL
        SELECT
            bd.primary_key_column_s,
            hd_spell_fiscal_year,
            secondary_procedure_10  procedure_code,
            secondary_diagnosis_10    diagnosis_code
        FROM
            temp_ip
        UNION ALL
        SELECT
            bd.primary_key_column_s,
            hd_spell_fiscal_year,
            secondary_procedure_11  procedure_code,
            secondary_diagnosis_11    diagnosis_code
        FROM
            temp_ip    
        SELECT
            bd.primary_key_column_s,
            hd_spell_fiscal_year,
            secondary_procedure_12  procedure_code,
            secondary_diagnosis_12    diagnosis_code
        FROM
            temp_ip
    ), hd_procedure_hierarchy AS
    (   SELECT
            NVL (MAX (a.hierarchy), 0) hd_procedure_hierarchy,
            a.fiscal_year
        FROM
            ref_hd.nhs_opcs_hier a,
            codes pc
        WHERE
            a.fiscal_year = pc.hd_spell_fiscal_year
        AND
            a.code = pc.procedure_code
        GROUP BY
            a.fiscal_year
    ),hd_diagnosis_hierarchy AS
    (   SELECT
            NVL (MAX (a.hierarchy), 0) hd_diagnosis_hierarchy,
            a.fiscal_year
        FROM
            ref_hd.nhs_icd10_hier a,
            codes pc
        WHERE
            a.fiscal_year = pc.hd_spell_fiscal_year
        AND
            a.code = pc.diagnosis_code
        GROUP BY
            a.fiscal_year
    SELECT b.*, a.hd_procedure_hierarchy, c.hd_diagnosis_hierarchy
      FROM analytic_hes.temp_ip b,
           LEFT OUTER JOIN hd_procedure_hierarchy a
              ON (a.fiscal_year = b.hd_spell_fiscal_year)
           LEFT OUTER JOIN hd_diagnosis_hierarchy c
              ON (c.fiscal_year = b.hd_spell_fiscal_year)HTH
    David

  • I think Microsoft official document regarding creating table is wrong, want to confirm this with others

    Hi guys,
    I think the syntax regarding creating a table from Microsoft official website is wrong, but I really cannot believe that they made a mistake in such a basic and popular syntax. So I need to confirm this with you guys.
    The syntax from Microsoft is as follows:
    CREATE TABLE
    [ database_name . [ schema_name ] . | schema_name . ] table_name
    [ AS FileTable ]
    ( { <column_definition> | <computed_column_definition>
    | <column_set_definition> | [ <table_constraint> ] [ ,...n ] } )
    [ ON { partition_scheme_name ( partition_column_name ) | filegroup
    | "default" } ]
    [ { TEXTIMAGE_ON { filegroup | "default" } ]
    [ FILESTREAM_ON { partition_scheme_name | filegroup
    | "default" } ]
    [ WITH ( <table_option> [ ,...n ] ) ]
    The part I am suspecting is 
    CREATE TABLE
    [ database_name . [ schema_name ] . | schema_name . ] table_name
    The way I understand this statement is that you can do either one of thing as below, they should all be correct:
    1. just specify table_name
    2. specify schema_name and table_name
    3. specify database_name, schema_name and table_name
    4. specify database_name and table_name
    Based on my test, the number four is incorrect, which makes me confused
    Can anyone correct me if I am wrong? Thanks!

    You can exclude schema name but still need to put the dot
    so its either
    database.schemaname.
    or 
    database..
    before the table name
    Please Mark This As Answer if it solved your issue
    Please Vote This As Helpful if it helps to solve your issue
    Visakh
    My Wiki User Page
    My MSDN Page
    My Personal Blog
    My Facebook Page

  • Partitioning Hard Drive for Use with Mac (Time Machine) and Windows

    I have a brand new Imac on which I am also running WIndows Vista through Boot Camp (sweet, by the way). I have a large external hard drive on which I set up Time Machine easily enough. I would like to partition the drive somehow so that I could use half the drive to separately back up files on the WIndows side, but I can't seem to do this. Any suggestions? Is this even possible with Time Machine?

    You can use Disk Utility create two partitions. Choose FAT for the Windows partition and Mac Extended (Journaled) for the Mac. One limitation of FAT32 is that you can't write files bigger than 4GB. You can reformat this partition as NTFS within Vista which will allow bigger file sizes but you won't be able to write to this partition from within OS X without other utilities. Also you can't write to the OS X partition from Vista without other software.
    Time Machine will not be able to back up the windows formatted drive partition.

  • Dynamically creating table and inserting rows and columns using JSP

    Hi,
    I'm using mysql and JSP to create web interface for my forms/tables. I wanna create the table dynamically depending on the data in the table and for each particualar record. and these values shud be loaded on the form after loading from the databaes as soon as the form loads.
    also i want to have a button which shud add new rows dynamically. and same one for columns.
    how do i calculate the values across the rows on the forms
    I'm new to JSP. Please point me in the right direction...any tutorials or code will be helpful.
    Any help is appreciated.
    Thanks
    Ayesha

    u write the code in sequence:
    1 write jdbs to select count(* )from table
    2 put in array e.g. String doc_no=new String[count];
    3 write jdbs to select * from table with condition
    //for no. of records just write
    for(int j=0;j<(count);j++){
    <% while(rs4.next()){
         doc_no=rs4.getString(2);
                        date1[i]=rs4.getString(3);
         doc_type[i]=rs4.getString(4);
         location[i]=rs4.getString(5);
         cheque[i]=rs4.getString(6);
         rate[i]=rs4.getInt(7);
         deb_qty[i]=rs4.getInt(8);
         cre_qty[i]=rs4.getInt(9);
         deb_amt[i]=rs4.getInt(10);
         cre_amt[i]=rs4.getInt(11);
         i++;
         //rs4.close();
                   for(int j=0;j<(count);j++){
                   System.out.println("Data count= "+j);
                   %>
    <tr>
    <td width="15%"><font size="1"><%=doc_no[j] %></font></td>
    <td width="10%"><font size="1"><%=date1[j] %></font></td>
    <td width="12%"><font size="1"><%=doc_type[j] %></font></td>
    <td width="9%"><font size="1"><%=location[j] %></font></td>
    <td width="9%">
    <div align="left"><font size="1"><%=cheque[j] %></font></div>
    </td>
    <td width="8%">
    <div align="right"><font size="1"><%=deb_qty[j] %></font></div>
    </td>
    <td width="8%">
    <div align="right"><font size="1"><%=cre_qty[j] %></font></div>
    </td>
    <td width="9%">
    <div align="right"><font size="1"><%=deb_amt[j] %></font></div>
    </td>
    <td width="10%">
    <div align="right"><font size="1"><%=cre_amt[j] %></font></div>
    </td>
    </tr>
    write if there is any specific problem
    bye,
    Samir

  • Re-create Table for partitioning

    Hi, I would like to know if is possible add partition in a table that it's not partitioned.
    I think that I must recreate table...are there other solution?
    Andrea (Italy)

    http://asktom.oracle.com/pls/ask/f?p=4950:8:15774480059312498807::NO::F4950_P8_DISPLAYID,F4950_P8_CRITERIA:4636779130376

  • Create book file on basis of txt file with local paths?

    I would like to be able to create a book file in FrameMaker 7 or FrameMaker 9 on the basis of a  .txt file reading for instance:
    c:/documents/job.fm
    c:/documents/15ss.fm
    c:/documents/sdf/job1.fm
    Preferrably I would like to be able to paste the lines into a prompt and have a script compiling the book file.
    Is it possible to write a script using FrameMakers API which would do this trick? It is probably possible to use FrameScript but I am not too keen on FrameScript and would rather avoid it.
    Who should one ask in order to have such a thing written?
    regards
    Bjørn

    StudioSm wrote:
    I would like to be able to create a book file in FrameMaker 7 or FrameMaker 9 on the basis of a  .txt file reading for instance:
    c:/documents/job.fm
    c:/documents/15ss.fm
    c:/documents/sdf/job1.fm
    Preferrably I would like to be able to paste the lines into a prompt and have a script compiling the book file.
    Is it possible to write a script using FrameMakers API which would do this trick? It is probably possible to use FrameScript but I am not too keen on FrameScript and would rather avoid it.
    Who should one ask in order to have such a thing written?
    regards
    Bjørn
    I started using FrameMaker in release 2.x, while I was using x-roff on UNIX; x-roff used a "makefile" to create books from independent chapters, doing all the pagination, numbering, etc. I suspected that FrameMaker's book file worked like that, but it wasn't until many years later that I met someone who'd worked with FrameMaker version 1.x, who confirmed my suspicion.
    Although I can't tell you how to do it, I'm certain that it can be programmed as you want, and that someone will post more information.
    HTH
    Regards,
    Peter
    Peter Gold
    KnowHow ProServices

  • Comparing incoming file name having timestamp with system time.

    Hi,
    The scenario is File to FIle.
    i have to read the incoming file name which will be having time stamp.
    compare the time stamp with the system time .
    if the difference in time is within specified time then send the file to target location,else send a mail that file is not placed in target location.
    please enlight me on how this scenario can be done.
    Thanks,
    Akkasali.

    if you mean the actual time stamp on the file and not the file name then ref: http://www.sdn.sap.com/irj/scn/weblogs;jsessionid=(J2EE3417300)ID2044216150DB12410233167314983393End?blog=/pub/wlg/15154
    else if you mean the time stamp on the file name itself then use dynamic configuration and then read the file name and do the further processing - /people/shabarish.vijayakumar/blog/2009/03/26/dynamic-configuration-vs-variable-substitution--the-ultimate-battle-for-the-file-name

  • Unique or primary key on timestamp with timezone

    Hi,
    I have been experimenting with a date column in a primary key, or actually I tried using a timestamp with time zone in a primary key.
    While researching whether there was a way to avoid ORA-02329, I found the following:
    K15> create table dumdum
      2    (datum date not null
      3    ,naamp varchar2( 30 ) not null);
    Table created.
    K15>
    K15> alter table dumdum
      2    add constraint d_pk
      3        primary key
      4          (datum, naamp)
      5    using index;
    Table altered.
    K15>
    K15> select ind.index_type
      2  from   user_indexes ind
      3  where  ind.index_name = 'D_PK';
    INDEX_TYPE
    NORMAL
    1 row selected.
    K15>
    K15> insert into dumdum
      2    (datum
      3    ,naamp )
      4  select sysdate - (level/1440)
      5  ,      'nomen nescio'
      6  from   dual
      7  connect by level < 1000
      8  ;
    999 rows created.
    K15>
    K15> analyze index d_pk validate structure;
    Index analyzed.
    K15> analyze table dumdum compute statistics;
    Table analyzed.
    K15>
    K15> select naamp
      2  from   dumdum
      3  where  datum > to_date('16-06-2011 15.46.16', 'dd-mm-yyyy hh24.mi.ss' )
      4 
    K15> For the last select statement I get the following "explain plan":
    SELECT STATEMENT  CHOOSE
              Cost: 2  Bytes: 247  Cardinality: 13       
         1 INDEX RANGE SCAN UNIQUE D_PK
                    Cost: 3  Bytes: 247  Cardinality: 13  This behavior lived up to my expectations.
    Then, I tried this:
    K15> create table dumdum
      2    (datum date not null
      3    ,naamp varchar2( 30 ) not null);
    Table created.
    K15>
    K15> alter table dumdum
      2    add constraint d_pk
      3        primary key
      4          (datum, naamp)
      5    using index;
    Table altered.
    K15>
    K15> alter table dumdum
      2        modify datum timestamp(6) with time zone;
    Table altered.
    K15>
    K15> select ind.index_type
      2  from   user_indexes ind
      3  where  ind.index_name = 'D_PK';
    INDEX_TYPE
    NORMAL
    1 row selected.
    K15>
    K15> insert into dumdum
      2    (datum
      3    ,naamp )
      4  select sysdate - (level/1440)
      5  ,      'nomen nescio'
      6  from   dual
      7  connect by level < 1000
      8  ;
    999 rows created.
    K15>
    K15> analyze index d_pk validate structure;
    Index analyzed.
    K15> analyze table dumdum compute statistics;
    Table analyzed.
    K15>
    K15> select naamp
      2  from   dumdum
      3  where  datum > to_date('16-06-2011 15.46.16', 'dd-mm-yyyy hh24.mi.ss' )
      4
    K15> So, at first glance, the alter table statement to change the datatype from DATE to TIMESTAMP seems like a way of fooling Oracle. But the explain plan reveals a different story:
    SELECT STATEMENT  CHOOSE
              Cost: 4  Bytes: 1,25  Cardinality: 50       
         1 TABLE ACCESS FULL DUMDUM
                    Cost: 4  Bytes: 1,25  Cardinality: 50  I was only fooling myself. :-0
    But I wasn't done with my research:
    K15> create table dumdum
      2    (datum timestamp(6) with time zone not null
      3    ,naamp varchar2( 30 ) not null);
    Table created.
    K15>
    K15> create unique index d_ind
      2      on dumdum
      3           (datum, naamp);
    Index created.
    K15>
    K15>
    K15> select ind.index_type
      2  from   user_indexes ind
      3  where  ind.index_name = 'D_IND';
    INDEX_TYPE
    FUNCTION-BASED NORMAL
    1 row selected.
    K15>
    K15> insert into dumdum
      2    (datum
      3    ,naamp )
      4  select systimestamp - (level/1440)
      5  ,      'nomen nescio'
      6  from   dual
      7  connect by level < 1000
      8  ;
    999 rows created.
    K15>
    K15> analyze index d_ind validate structure;
    Index analyzed.
    K15> analyze table dumdum compute statistics;
    Table analyzed.
    K15>
    K15> select naamp
      2  from   dumdum
      3  where  datum > to_date('16-06-2011 15.56.16', 'dd-mm-yyyy hh24.mi.ss' )
      4
    K15>Now, my explain plan looks fine:
    SELECT STATEMENT  CHOOSE
              Cost: 2  Bytes: 1,25  Cardinality: 50       
         1 INDEX RANGE SCAN UNIQUE D_IND
              Cost: 3  Bytes: 1,25  Cardinality: 50  Why is Oracle so adamant about not allowing a timestamp with time zone in a unique key? And, given their position on the matter, where does their tolerance for a unique index come from?
    By the way, if I had a say in it, I would not allow anything that even remotely looks like a date to be part of a primary key, but that's another discussion.
    Thanks,
    Remco
    P.S. All this is on Oracle9i Enterprise Edition Release 9.2.0.8.0. Is it different on 10g or 11g?

    See if this helps. You can create primary key for TIMESTAMP WITH LOCAL TIME ZONE datatype.
    SQL>CREATE TABLE Mytimezone(Localtimezone TIMESTAMP WITH LOCAL TIME ZONE primary key, Location varchar2(20) );
    Table created.
    http://download.oracle.com/docs/cd/B19306_01/server.102/b14225/ch4datetime.htm#i1006169
    TIMESTAMP WITH LOCAL TIME ZONE Datatype
    TIMESTAMP WITH LOCAL TIME ZONE is another variant of TIMESTAMP. It differs from TIMESTAMP WITH TIME ZONE as follows: data stored in the database is normalized to the database time zone, and the time zone offset is not stored as part of the column data. When users retrieve the data, Oracle returns it in the users' local session time zone. The time zone offset is the difference (in hours and minutes) between local time and UTC (Coordinated Universal Time, formerly Greenwich Mean Time).
    Thanks
    http://swervedba.wordpress.com/

  • Partitioning on a table on the field TIMESTAMP WITH TIME ZONE

    Hi I have a very large size table which has grown to a size that we are not able to query it efficiently. We have decided to partition the table. But the issue is the table has a TIMESTAMP WITH TIME ZONE field and not DATE. I have found some links on the web which state this might cause an error. I am planning to create a temp table with the partition rules and at the same time copy data from the original one.
    using CREATE TABLE XYZ PARTITION BY RANGE (ABC) ( ---- Partition rules ------) NOLOGGING AS SELECT * FROM XYZ_ACTUAL where 1 = 2;
    Then if it works fine, I would rename the table with partitions to the actual name.
    Should all this be fine?
    The database is very critical. Hence the dilemma.

    Have you tried converting the timestamp with time zone to a character string as a partition key, possibly using an edit mask to control the timestamp components used?
    Your plan sounds OK to me - if you can get the partitioned table created - but I would test in a development first o see where the lLw of Unintended Consequences might decide to manifest itself.
    Edited by: riedelme on Dec 8, 2009 9:13 AM

  • Is it possible to create table with partition in compress mode

    Hi All,
    I want to create a table in compress option, with partitions. When i create with partitions the compression isnt enabled, but with noramal table creation the compression option is enables.
    My question is:
    cant we create a table with partition/subpartition in compress mode..? Please help.
    Below is the code that i have used for table creation.
    CREATE TABLE temp
      TRADE_ID                    NUMBER,
      SRC_SYSTEM_ID               VARCHAR2(60 BYTE),
      SRC_TRADE_ID                VARCHAR2(60 BYTE),
      SRC_TRADE_VERSION           VARCHAR2(60 BYTE),
      ORIG_SRC_SYSTEM_ID          VARCHAR2(30 BYTE),
      TRADE_STATUS                VARCHAR2(60 BYTE),
      TRADE_TYPE                  VARCHAR2(60 BYTE),
      SECURITY_TYPE               VARCHAR2(60 BYTE),
      VOLUME                      NUMBER,
      ENTRY_DATE                  DATE,
        REASON                      VARCHAR2(255 BYTE),
    TABLESPACE data
    PCTUSED    0
    PCTFREE    10
    INITRANS   1
    MAXTRANS   255
    NOLOGGING
    COMPRESS
    NOCACHE
    PARALLEL (DEGREE 6 INSTANCES 1)
    MONITORING
    PARTITION BY RANGE (TRADE_DATE)
    SUBPARTITION BY LIST (SRC_SYSTEM_ID)
    SUBPARTITION TEMPLATE
      (SUBPARTITION SALES VALUES ('sales'),
       SUBPARTITION MAG VALUES ('MAG'),
       SUBPARTITION SPI VALUES ('SPI', 'SPIM', 'SPIIA'),
       SUBPARTITION FIS VALUES ('FIS'),
       SUBPARTITION GD VALUES ('GS'),
       SUBPARTITION ST VALUES ('ST'),
       SUBPARTITION KOR VALUES ('KOR'),
       SUBPARTITION BLR VALUES ('BLR'),
       SUBPARTITION SUT VALUES ('SUT'),
       SUBPARTITION RM VALUES ('RM'),
       SUBPARTITION DEFAULT VALUES (default)
    PARTITION RMS_TRADE_DLY_MAX VALUES LESS THAN (MAXVALUE)    
        LOGGING
            TABLESPACE data
         ( SUBPARTITION TS_MAX_SALES VALUES ('SALES')      TABLESPACE data,
        SUBPARTITION TS_MAX_MAG VALUES ('MAG')      TABLESPACE data,
        SUBPARTITION TS_MAX_SPI VALUES ('SPI', 'SPIM', 'SPIIA')      TABLESPACE data,
        SUBPARTITION TS_MAX_FIS VALUES ('FIS')      TABLESPACE data,
        SUBPARTITION TS_MAX_GS VALUES ('GS')      TABLESPACE data,
        SUBPARTITION TS_MAX_ST VALUES ('ST')      TABLESPACE data,
        SUBPARTITION TS_MAX_KOR VALUES ('KOR')      TABLESPACE data,
        SUBPARTITION TS_MAX_BLR VALUES ('BLR')      TABLESPACE data,
        SUBPARTITION TS_MAX_SUT VALUES ('SUT')      TABLESPACE data,
        SUBPARTITION TS_MAX_RM VALUES ('RM')      TABLESPACE data,
        SUBPARTITION TS_MAX_DEFAULT VALUES (default)      TABLESPACE data)); Edited by: user11942774 on 8 Dec, 2011 5:17 AM

    user11942774 wrote:
    I want to create a table in compress option, with partitions. When i create with partitions the compression isnt enabled, but with noramal table creation the compression option is enables. First of all your CREATE TABLE statement is full of syntax errors. Next time test it before posting - we don't want to spend time on fixing things not related to your question.
    Now, I bet you check COMPRESSION value of partitioned table same way you do it for a non-partitioned table - in USER_TABLES - and therefore get wrong results. Since compreesion can be enabled on individual partition level you need to check COMPRESSION in USER_TAB_PARTITIONS:
    SQL> CREATE TABLE temp
      2  (
      3    TRADE_ID                    NUMBER,
      4    SRC_SYSTEM_ID               VARCHAR2(60 BYTE),
      5    SRC_TRADE_ID                VARCHAR2(60 BYTE),
      6    SRC_TRADE_VERSION           VARCHAR2(60 BYTE),
      7    ORIG_SRC_SYSTEM_ID          VARCHAR2(30 BYTE),
      8    TRADE_STATUS                VARCHAR2(60 BYTE),
      9    TRADE_TYPE                  VARCHAR2(60 BYTE),
    10    SECURITY_TYPE               VARCHAR2(60 BYTE),
    11    VOLUME                      NUMBER,
    12    ENTRY_DATE                  DATE,
    13      REASON                      VARCHAR2(255 BYTE),
    14    TRADE_DATE                  DATE
    15  )
    16  TABLESPACE users
    17  PCTUSED    0
    18  PCTFREE    10
    19  INITRANS   1
    20  MAXTRANS   255
    21  NOLOGGING
    22  COMPRESS
    23  NOCACHE
    24  PARALLEL (DEGREE 6 INSTANCES 1)
    25  MONITORING
    26  PARTITION BY RANGE (TRADE_DATE)
    27  SUBPARTITION BY LIST (SRC_SYSTEM_ID)
    28  SUBPARTITION TEMPLATE
    29    (SUBPARTITION SALES VALUES ('sales'),
    30     SUBPARTITION MAG VALUES ('MAG'),
    31     SUBPARTITION SPI VALUES ('SPI', 'SPIM', 'SPIIA'),
    32     SUBPARTITION FIS VALUES ('FIS'),
    33     SUBPARTITION GD VALUES ('GS'),
    34     SUBPARTITION ST VALUES ('ST'),
    35     SUBPARTITION KOR VALUES ('KOR'),
    36     SUBPARTITION BLR VALUES ('BLR'),
    37     SUBPARTITION SUT VALUES ('SUT'),
    38     SUBPARTITION RM VALUES ('RM'),
    39     SUBPARTITION DEFAULT_SUB VALUES (default)
    40    )  
    41  (  
    42   PARTITION RMS_TRADE_DLY_MAX VALUES LESS THAN (MAXVALUE)    
    43      LOGGING
    44          TABLESPACE users
    45       ( SUBPARTITION TS_MAX_SALES VALUES ('SALES')      TABLESPACE users,
    46      SUBPARTITION TS_MAX_MAG VALUES ('MAG')      TABLESPACE users,
    47      SUBPARTITION TS_MAX_SPI VALUES ('SPI', 'SPIM', 'SPIIA')      TABLESPACE users,
    48      SUBPARTITION TS_MAX_FIS VALUES ('FIS')      TABLESPACE users,
    49      SUBPARTITION TS_MAX_GS VALUES ('GS')      TABLESPACE users,
    50      SUBPARTITION TS_MAX_ST VALUES ('ST')      TABLESPACE users,
    51      SUBPARTITION TS_MAX_KOR VALUES ('KOR')      TABLESPACE users,
    52      SUBPARTITION TS_MAX_BLR VALUES ('BLR')      TABLESPACE users,
    53      SUBPARTITION TS_MAX_SUT VALUES ('SUT')      TABLESPACE users,
    54      SUBPARTITION TS_MAX_RM VALUES ('RM')      TABLESPACE users,
    55      SUBPARTITION TS_MAX_DEFAULT VALUES (default)      TABLESPACE users));
    Table created.
    SQL>
    SQL>
    SQL> SELECT  PARTITION_NAME,
      2          COMPRESSION
      3    FROM USER_TAB_PARTITIONS
      4    WHERE TABLE_NAME = 'TEMP'
      5  /
    PARTITION_NAME                 COMPRESS
    RMS_TRADE_DLY_MAX              ENABLED
    SQL> SELECT  COMPRESSION
      2    FROM USER_TABLES
      3    WHERE TABLE_NAME = 'TEMP'
      4  /
    COMPRESS
    SQL> SY.

  • DirectToXMLTypeMapping "create-tables" not generating XMLTYPE column type

    Can someone tell me how to code an XMLTYPE field such that "create-tables" will generate the XMLTYPE column and such that the IntegrityChecker will not throw an error.
    I am forced to run these alters after "create-tables" is run.
    ALTER TABLE XML_SYS_MSG drop column message;
    ALTER TABLE XML_SYS_MSG add (message XMLType);
    Snippets:
    <persistence...
    <property name="eclipselink.ddl-generation" value="create-tables" />
    </persistence>
    public class XmlMessageCustomizer implements DescriptorCustomizer {
    @Override
    public void customize(final ClassDescriptor descriptor) throws Exception {
    final DirectToXMLTypeMapping mapping = new DirectToXMLTypeMapping();
    descriptor.removeMappingForAttributeName("message");
    // name of the attribute
    mapping.setAttributeName("message");
    // IntegrityChecker requires uppercase for oracle
    // name of the column
    mapping.setFieldName("MESSAGE");
    descriptor.addMapping(mapping);
    @Entity(name = "XmlMessage")
    @Table(name = "XML_MSG")
    @Customizer(XmlMessageCustomizer.class)
    public class XmlMessage {
    @Id
    @GeneratedValue(strategy = GenerationType.AUTO)
    @Column(name = "ID")
    private long id;
    // @Column(columnDefinition = "XMLTYPE")
    // private String message;
    // ALTER TABLE XML_SYS_MSG drop column message;
    // ALTER TABLE XML_SYS_MSG add (message XMLType);
    private Document message;
    public XmlMessage() {
    public long getId() {
    return id;
    public void setId(final long id) {
    this.id = id;
    public Document getMessage() {
    return message;
    public void setMessage(final Document message) {
    this.message = message;
    Secondly if I turn on the IntegrityChecker it will fail
    public class EnableIntegrityChecker implements SessionCustomizer {
    @Override
    public void customize(final Session session) throws Exception {
    session.getIntegrityChecker().checkDatabase();
    session.getIntegrityChecker().setShouldCatchExceptions(false);
    }

    Adding:
         mapping.getField().setColumnDefinition("XMLTYPE");to the customizer should solve the problem.
    --Shaun                                                                                                                                                                                                                                                                                       

Maybe you are looking for