Read Memory with "processmemory"

Hello to All 
I'm doing a project porting from 32-bit Delphi, Visual C # to a 64-bit 
in this project I had a procedure called "processmemory" who gave me the exact copy of the contents of a treeview component of an external application in a treeview component of my application 
I can not find anything like it or translate it to delphi ac # 
or even tried to use the command AutomationElement me but unfortunately this law only the nodes that are currently open in the application of the other component 
At this point I have two choices 
1) convert the code from Delphi to c # but valid for 64 bit 
2) use AutomationElement but also understand how to read the closed elements or find a way how to open all nodes of the external application and then read 
I'll post the code below processmemory
unit uProcessMemMgr;
author: Michael Winter, [email protected]
ver:    0.10, 2005-04-27
desc:
Provides access to memory of other processes currently running on the same
machine. Memory can be allocated and deallocated inside the context of the
other process. Read and write operations supported, not limited to portions
of memory allocated by the object. Works for the own process, too.
notes:
You need one TProcessMemMgr object for each process.
Freeing the TProcessMemMgr frees all memory allocated for the appropriate
process.
Please report any problems with this unit to the email address above.
interface
uses
  Windows, Classes,sysutils;
const
  MemMgrMemSize = 16*1024;
type
  TProcessMemMgr = class(TObject)
  public
    function AllocMem(Bytes: Cardinal): Pointer; virtual; abstract;
    procedure FreeMem(P: Pointer); virtual; abstract;
    procedure Read(Source: Pointer; var Dest; Bytes: Cardinal); virtual; abstract;
    function ReadStr(Source: PChar): String; overload; virtual; abstract;
    function ReadStr(Source: Pointer; Bytes: Cardinal): String; overload;
    procedure Write(const Source; Dest: Pointer; Bytes: Cardinal); virtual; abstract;
    procedure WriteStr(const Str: String; Dest: Pointer); virtual; abstract;
  end;
function CreateProcessMemMgr(ProcessID: Cardinal): TProcessMemMgr;
function CreateProcessMemMgrForWnd(Wnd: HWND): TProcessMemMgr;
implementation
type
  EProcessMemMgr = class(Exception);
  TOwnProcessMemMgr = class(TProcessMemMgr)
  private
    FMemList: TThreadList;
  public
    constructor Create;
    destructor Destroy; override;
    function AllocMem(Bytes: Cardinal): Pointer; override;
    procedure FreeMem(P: Pointer); override;
    procedure Read(Source: Pointer; var Dest; Bytes: Cardinal); override;
    function ReadStr(Source: PChar): String; override;
    procedure Write(const Source; Dest: Pointer; Bytes: Cardinal); override;
    procedure WriteStr(const Str: String; Dest: Pointer); override;
  end;
  TForeignProcessMemMgr = class(TProcessMemMgr)
  private
    FProcess: THandle;
    FMemList: TThreadList;
  protected
    procedure NeedMoreMem(Bytes: Cardinal); virtual; abstract;
  public
    constructor Create(ProcessID: Cardinal);
    destructor Destroy; override;
    function AllocMem(Bytes: Cardinal): Pointer; override;
    procedure FreeMem(P: Pointer); override;
    procedure Read(Source: Pointer; var Dest; Bytes: Cardinal); override;
    function ReadStr(Source: PChar): String; override;
    procedure Write(const Source; Dest: Pointer; Bytes: Cardinal); override;
    procedure WriteStr(const Str: String; Dest: Pointer); override;
  end;
  TWin9xProcessMemMgr = class(TForeignProcessMemMgr)
  private
    FSharedList: TList;
  protected
    procedure NeedMoreMem(Bytes: Cardinal); override;
  public
    constructor Create(ProcessID: Cardinal);
    destructor Destroy; override;
  end;
  TWinNTProcessMemMgr = class(TForeignProcessMemMgr)
  private
    FAllocList: TList;
  protected
    procedure NeedMoreMem(Bytes: Cardinal); override;
  public
    constructor Create(ProcessID: Cardinal);
    destructor Destroy; override;
  end;
  PMemRec = ^TMemRec;
  TMemRec = record
    Start: Pointer;
    Size: Cardinal;
    Group: Integer;
    Used: Boolean;
  end;
  TSimpleSharedMem = class(TObject)
  private
    FMapping: THandle;
    FBaseAddr: Pointer;
  public
    constructor Create(Size: Cardinal);
    destructor Destroy; override;
    property BaseAddr: Pointer read FBaseAddr;
  end;
{ Win95 doesn't export these functions, thus we have to import them dynamically: }
var
  VirtualAllocEx: function (hProcess: THandle; lpAddress: Pointer;
    dwSize, flAllocationType: DWORD; flProtect: DWORD): Pointer; stdcall = nil;
  VirtualFreeEx: function(hProcess: THandle; lpAddress: Pointer;
        dwSize, dwFreeType: DWORD): Pointer; stdcall = nil;
procedure NeedVirtualAlloc;
var
  H: HINST;
begin
  if @VirtualFreeEx <> nil then exit;
  H := GetModuleHandle(kernel32);
  if H = 0 then
    RaiseLastWin32Error;
  @VirtualAllocEx := GetProcAddress(H, 'VirtualAllocEx');
  if @VirtualAllocEx = nil then
    RaiseLastWin32Error;
  @VirtualFreeEx := GetProcAddress(H, 'VirtualFreeEx');
  if @VirtualFreeEx = nil then
    RaiseLastWin32Error;
end;
{ TProcessMemMgr }
function TProcessMemMgr.ReadStr(Source: Pointer; Bytes: Cardinal): String;
begin
  SetLength(Result, Bytes);
  if Bytes > 0 then
    Read(Source, Result[1], Bytes);
end;
{ TOwnProcessMemMgr }
function TOwnProcessMemMgr.AllocMem(Bytes: Cardinal): Pointer;
begin
  Result := AllocMem(Bytes); //2048; //SysUtils.AllocMem(Bytes);
  FMemList.Add(Result);
end;
constructor TOwnProcessMemMgr.Create;
begin
  inherited;
  FMemList := TThreadList.Create;
end;
destructor TOwnProcessMemMgr.Destroy;
var
  i: Integer;
begin
  if Assigned(FMemList) then begin
    with FMemList.LockList do try
      for i := 0 to Count - 1 do
        System.FreeMem(Items[i]);
    finally
      FMemList.UnlockList;
    end;
    FMemList.Free;
  end;
  inherited;
end;
procedure TOwnProcessMemMgr.FreeMem(P: Pointer);
begin
  FMemList.Remove(P);
  System.FreeMem(P);
end;
procedure TOwnProcessMemMgr.Read(Source: Pointer; var Dest; Bytes: Cardinal);
begin
  System.Move(Source^, Dest, Bytes);
end;
function TOwnProcessMemMgr.ReadStr(Source: PChar): String;
begin
  Result := Source;
end;
procedure TOwnProcessMemMgr.Write(const Source; Dest: Pointer; Bytes: Cardinal);
begin
  System.Move(Source, Dest^, Bytes);
end;
procedure TOwnProcessMemMgr.WriteStr(const Str: String; Dest: Pointer);
begin
  StrPCopy(Dest, Str);
end;
{ TForeignProcessMemMgr }
function TForeignProcessMemMgr.AllocMem(Bytes: Cardinal): Pointer;
var
  t: Integer;
  i: Integer;
  Rec, NewRec: PMemRec;
  Remain: Cardinal;
begin
  Result := nil;
  with FMemList.LockList do try
    for t := 0 to 1 do begin
      for i := 0 to Count - 1 do begin
        Rec := Items[i];
        if not Rec^.Used and (Rec^.Size >= Bytes) then begin
          Remain := Rec^.Size - Bytes;
          Rec^.Size := Bytes;
          Rec^.Used := true;
          Result := Rec^.Start;
          if Remain > 0 then begin
            New(NewRec);
            NewRec^.Start := Pointer(Cardinal(Result) + Cardinal(Bytes));
            NewRec^.Size := Remain;
            NewRec^.Group := Rec^.Group;
            NewRec^.Used := false;
            Insert(i + 1, NewRec);
          end;
          exit;
        end;
      end;
      NeedMoreMem(Bytes);
    end;
    raise EProcessMemMgr.Create('ProcessMemMgr.AllocMem: not enough memory');
  finally
    FMemList.UnlockList;
  end;
end;
constructor TForeignProcessMemMgr.Create(ProcessID: Cardinal);
begin
  inherited Create;
  FProcess := OpenProcess(PROCESS_VM_OPERATION or PROCESS_VM_READ or PROCESS_VM_WRITE, false, ProcessID);
  if FProcess = 0 then RaiseLastWin32Error;
  FMemList := TThreadList.Create;
end;
destructor TForeignProcessMemMgr.Destroy;
var
  i: Integer;
begin
  if Assigned(FMemList) then begin
    with FMemList.LockList do try
      for i := 0 to Count - 1 do
        Dispose(PMemRec(Items[i]));
    finally
      FMemList.UnlockList;
    end;
    FMemList.Free;
  end;
  CloseHandle(FProcess);
  inherited;
end;
procedure TForeignProcessMemMgr.FreeMem(P: Pointer);
var
  i, j: Integer;
  Rec, NextRec: PMemRec;
begin
  with FMemList.LockList do try
    for i := 0 to Count - 1 do begin
      Rec := Items[i];
      if Rec^.Start = P then begin
        Rec^.Used := false;
        j := i + 1;
        while j < Count do begin
          NextRec := Items[j];
          if NextRec^.Used then exit;
          if NextRec^.Group <> Rec^.Group then exit;
          inc(Rec^.Size, NextRec^.Size);
          Dispose(NextRec);
          Delete(j);
        end;
        exit;
      end;
    end;
    Assert(false, 'ProcessMemMgr.FreeMem: unknown pointer');
  finally
    FMemList.UnlockList;
  end;
end;
procedure TForeignProcessMemMgr.Read(Source: Pointer; var Dest; Bytes: Cardinal);
var
  BytesRead: Cardinal;
begin
  if not ReadProcessMemory(FProcess, Source, @Dest, Bytes, BytesRead) then
    RaiseLastWin32Error;
end;
function TForeignProcessMemMgr.ReadStr(Source: PChar): String;
var
  BytesRead: Cardinal;
  OldSz, DeltaSz, NewSz: Integer;
  Buf: PChar;
  i: Integer;
  Found: Integer;
begin
  Result := '';
  if Source = nil then exit;
  Buf := nil;
  OldSz := 0;
  DeltaSz := $1000 - (Cardinal(Source) and $FFF);
  Found := -1;
  try
    while Found < 0 do begin
      NewSz := OldSz + DeltaSz;
      System.ReallocMem(Buf, NewSz);
      if not ReadProcessMemory(FProcess, Source + OldSz, Buf + OldSz , DeltaSz, BytesRead) then
        RaiseLastWin32Error;
      for i := OldSz to NewSz - 1 do begin
        if Buf[i] = #0 then begin
          Found := i;
          break;
        end;
      end;
      OldSz := NewSz;
      DeltaSz := $1000;
    end;
    SetLength(Result, Found);
    if Found > 0 then
      System.Move(Buf^, Result[1], Found);
  finally
    System.FreeMem(Buf);
  end;
end;
procedure TForeignProcessMemMgr.Write(const Source; Dest: Pointer; Bytes: Cardinal);
var
  BytesWritten: Cardinal;
begin
  if not WriteProcessMemory(FProcess, Dest, @Source, Bytes, BytesWritten) then
    RaiseLastWin32Error;
end;
procedure TForeignProcessMemMgr.WriteStr(const Str: String; Dest: Pointer);
begin
  Write(PChar(Str)^, Dest, Length(Str) + 1);
end;
{ TWin9xProcessMemMgr }
constructor TWin9xProcessMemMgr.Create(ProcessID: Cardinal);
begin
  inherited;
  FSharedList := TList.Create;
end;
destructor TWin9xProcessMemMgr.Destroy;
var
  i: Integer;
begin
  if Assigned(FSharedList) then begin
    for i := FSharedList.Count - 1 downto 0 do
      TSimpleSharedMem(FSharedList[i]).Free;
    FSharedList.Free;
  end;
  inherited;
end;
procedure TWin9xProcessMemMgr.NeedMoreMem(Bytes: Cardinal);
var
  Ix: Integer;
  Share: TSimpleSharedMem;
  Rec: PMemRec;
begin
  if Bytes < MemMgrMemSize then
    Bytes := MemMgrMemSize
  else
    Bytes := (Bytes + $FFF) and not $FFF;
  Share := TSimpleSharedMem.Create(Bytes);
  Ix := FSharedList.Add(Share);
  New(Rec);
  Rec^.Start := Share.BaseAddr;
  Rec^.Size := Bytes;
  Rec^.Group := Ix;
  Rec^.Used := false;
  FMemList.Add(Rec);
end;
{ TWinNTProcessMemMgr }
constructor TWinNTProcessMemMgr.Create(ProcessID: Cardinal);
begin
  inherited;
  NeedVirtualAlloc;
  FAllocList := TList.Create;
end;
destructor TWinNTProcessMemMgr.Destroy;
var
  i: Integer;
begin
  if Assigned(FAllocList) then begin
    for i := 0 to FAllocList.Count - 1 do
      VirtualFreeEx(FProcess, FAllocList[i], 0, MEM_RELEASE);
    FAllocList.Free;
  end;
  inherited;
end;
procedure TWinNTProcessMemMgr.NeedMoreMem(Bytes: Cardinal);
var
  Ix: Integer;
  Alloc: Pointer;
  Rec: PMemRec;
begin
  if Bytes < MemMgrMemSize then
    Bytes := MemMgrMemSize
  else
    Bytes := (Bytes + $FFF) and not $FFF;
  Ix := FAllocList.Count;
  Alloc := VirtualAllocEx(FProcess, nil, MemMgrMemSize, MEM_COMMIT, PAGE_READWRITE);
  if Alloc = nil then RaiseLastWin32Error;
  FAllocList.Add(Alloc);
  New(Rec);
  Rec^.Start := Alloc;
  Rec^.Size := Bytes;
  Rec^.Group := Ix;
  Rec^.Used := false;
  FMemList.Add(Rec);
end;
{ TSimpleSharedMem }
constructor TSimpleSharedMem.Create(Size: Cardinal);
var
  PerfCount: Int64;
  UniqueName: String;
begin
  inherited Create;
  QueryPerformanceCounter(PerfCount);
  UniqueName := 'mw_pmm_' + IntToHex(PerfCount, 16);
  FMapping := CreateFileMapping(Cardinal(-1), nil, PAGE_READWRITE, 0, Size, PChar(UniqueName));
  if FMapping = 0 then
    RaiseLastWin32Error;
  FBaseAddr := MapViewOfFile(FMapping, FILE_MAP_WRITE, 0, 0, Size);
  if not Assigned(FBaseAddr) then
    RaiseLastWin32Error;
end;
destructor TSimpleSharedMem.Destroy;
begin
  UnmapViewOfFile(FBaseAddr);
  CloseHandle(FMapping);
  inherited;
end;
function CreateProcessMemMgr(ProcessID: Cardinal): TProcessMemMgr;
begin
  if ProcessID = GetCurrentProcessId then begin
    Result := TOwnProcessMemMgr.Create;
  end else begin
    if Win32Platform = VER_PLATFORM_WIN32_NT then
      Result := TWinNTProcessMemMgr.Create(ProcessID)
    else
      Result := TWin9xProcessMemMgr.Create(ProcessID);
  end;
end;
function CreateProcessMemMgrForWnd(Wnd: HWND): TProcessMemMgr;
var
  PID: Cardinal;
begin
  PID := 0;
  GetWindowThreadProcessId(Wnd, @PID);
  Result := CreateProcessMemMgr(PID);
end;
end.
Wacky Teseo

Hi Wacky Teseo,
This forum is to discuss problems of Windows Forms. Your question is related to TreeNode in winForms, but the conversion between C# and Delphi is not supported here.
You could consider posting it at the Delphi forum
for some solutions in Delphi. Thanks!
Best regards,
Youjun Tang
We are trying to better understand customer views on social support experience, so your participation in this interview project would be greatly appreciated if you have time. Thanks for helping make community forums a great place.
Click
HERE to participate the survey.

Similar Messages

  • MIXING ORIGINAL APPLE MEMORY WITH MEMORY FROM ANOTHER MANUFACTURER

    Hi everyone,
    I'm about to hop on the mac train and buy a macbook 2.16GHz( the recent previous black model)
    It's coming with 1GB of memory and I wanted to buy 2GB to add to it. I found a good deal for a OWC 2GB DDR2 PC2-5300 667MHz DIMM(it's one stick of 2GB).
    Is it compatible to mix different memory makes and if so, is it also optimal? Would the optimizing power be the same if I used all the memory from one manufacturer or it makes no difference? Thanks.

    bdkjones wrote:
    As long as the memory is by a major manufacturer (Samsung, Hynix, Micron, etc) it should not matter.
    You do get a slight advantage by using 2 of the same capacity RAM modules. (i.e. 2x1GB chips or 2x2GB chips). In practice, however, this performance gain really isn't noticeable. You have to run a memory benchmark test to see the difference.
    I remember there was this supposed IT guy who claimed that different brands of memory "fight it out". Some of the answers were absolutely hilarious. Seriously, there's nothing that consistent about a single brand of memory. There are always variations, but I wouldn't worry about a thing provided the memory actually meets its published specs. You get variation quality with memory modules that aren't fully-branded by one of the major memory IC manufacturers, but there are some reputable brands out there (OCZ, PNY, Patriot, etc).
    I have an iBook G4 1.42. For two+ years I've been running the permanent Samsung memory with a 1 GB Micron PC2700 module. I've never encountered anything that suggests that there is a problem with it.
    I've also read the Intel specs on what the chipset does with memory sized identically. Apparently it can do that very same thing (memory interleaving) even if the sizes aren't matched. However - it'll only do that for up to the size of the smaller module, and the remainder of the larger module isn't interleaved.

  • Pre-loading Oracle text in memory with Oracle 12c

    There is a white paper from Roger Ford that explains how to load the Oracle index in memory : http://www.oracle.com/technetwork/database/enterprise-edition/mem-load-082296.html
    In our application, Oracle 12c, we are indexing a big XML field (which is stored as XMLType with storage secure file) with the PATH_SECTION_GROUP. If I don't load the I table (DR$..$I) into memory using the technique explained in the white paper then I cannot have decent performance (and especially not predictable performance, it looks like if the blocks from the TOKEN_INFO columns are not memory then performance can fall sharply)
    But after migrating to oracle 12c, I got a different problem, which I can reproduce: when I create the index it is relatively small (as seen with ctx_report.index_size) and by applying the technique from the whitepaper, I can pin the DR$ I table into memory. But as soon as I do a ctx_ddl.optimize_index('Index','REBUILD') the size becomes much bigger and I can't pin the index in memory. Not sure if it is bug or not.
    What I found as work-around is to build the index with the following storage options:
    ctx_ddl.create_preference('TEST_STO','BASIC_STORAGE');
    ctx_ddl.set_attribute ('TEST_STO', 'BIG_IO', 'YES' );
    ctx_ddl.set_attribute ('TEST_STO', 'SEPARATE_OFFSETS', 'NO' );
    so that the token_info column will be stored in a secure file. Then I can change the storage of that column to put it in the keep buffer cache, and write a procedure to read the LOB so that it will be loaded in the keep cache. The size of the LOB column is more or less the same as when creating the index without the BIG_IO option but it remains constant even after a ctx_dll.optimize_index. The procedure to read the LOB and to load it into the cache is very similar to the loaddollarR procedure from the white paper.
    Because of the SDATA section, there is a new DR table (S table) and an IOT on top of it. This is not documented in the white paper (the white paper was written for Oracle 10g). In my case this DR$ S table is much used, and the IOT also, but putting it in the keep cache is not as important as the token_info column of the DR I table. A final note: doing SEPARATE_OFFSETS = 'YES' was very bad in my case, the combined size of the two columns is much bigger than having only the TOKEN_INFO column and both columns are read.
    Here is an example on how to reproduce the problem with the size increasing when doing ctx_optimize
    1. create the table
    drop table test;
    CREATE TABLE test
    (ID NUMBER(9,0) NOT NULL ENABLE,
    XML_DATA XMLTYPE
    XMLTYPE COLUMN XML_DATA STORE AS SECUREFILE BINARY XML (tablespace users disable storage in row);
    2. insert a few records
    insert into test values(1,'<Book><TITLE>Tale of Two Cities</TITLE>It was the best of times.<Author NAME="Charles Dickens"> Born in England in the town, Stratford_Upon_Avon </Author></Book>');
    insert into test values(2,'<BOOK><TITLE>The House of Mirth</TITLE>Written in 1905<Author NAME="Edith Wharton"> Wharton was born to George Frederic Jones and Lucretia Stevens Rhinelander in New York City.</Author></BOOK>');
    insert into test values(3,'<BOOK><TITLE>Age of innocence</TITLE>She got a prize for it.<Author NAME="Edith Wharton"> Wharton was born to George Frederic Jones and Lucretia Stevens Rhinelander in New York City.</Author></BOOK>');
    3. create the text index
    drop index i_test;
      exec ctx_ddl.create_section_group('TEST_SGP','PATH_SECTION_GROUP');
    begin
      CTX_DDL.ADD_SDATA_SECTION(group_name => 'TEST_SGP', 
                                section_name => 'SData_02',
                                tag => 'SData_02',
                                datatype => 'varchar2');
    end;
    exec ctx_ddl.create_preference('TEST_STO','BASIC_STORAGE');
    exec  ctx_ddl.set_attribute('TEST_STO','I_TABLE_CLAUSE','tablespace USERS storage (initial 64K)');
    exec  ctx_ddl.set_attribute('TEST_STO','I_INDEX_CLAUSE','tablespace USERS storage (initial 64K) compress 2');
    exec  ctx_ddl.set_attribute ('TEST_STO', 'BIG_IO', 'NO' );
    exec  ctx_ddl.set_attribute ('TEST_STO', 'SEPARATE_OFFSETS', 'NO' );
    create index I_TEST
      on TEST (XML_DATA)
      indextype is ctxsys.context
      parameters('
        section group   "TEST_SGP"
        storage         "TEST_STO"
      ') parallel 2;
    4. check the index size
    select ctx_report.index_size('I_TEST') from dual;
    it says :
    TOTALS FOR INDEX TEST.I_TEST
    TOTAL BLOCKS ALLOCATED:                                                104
    TOTAL BLOCKS USED:                                                      72
    TOTAL BYTES ALLOCATED:                                 851,968 (832.00 KB)
    TOTAL BYTES USED:                                      589,824 (576.00 KB)
    4. optimize the index
    exec ctx_ddl.optimize_index('I_TEST','REBUILD');
    and now recompute the size, it says
    TOTALS FOR INDEX TEST.I_TEST
    TOTAL BLOCKS ALLOCATED:                                               1112
    TOTAL BLOCKS USED:                                                    1080
    TOTAL BYTES ALLOCATED:                                 9,109,504 (8.69 MB)
    TOTAL BYTES USED:                                      8,847,360 (8.44 MB)
    which shows that it went from 576KB to 8.44MB. With a big index the difference is not so big, but still from 14G to 19G.
    5. Workaround: use the BIG_IO option, so that the token_info column of the DR$ I table will be stored in a secure file and the size will stay relatively small. Then you can load this column in the cache using a procedure similar to
    alter table DR$I_TEST$I storage (buffer_pool keep);
    alter table dr$i_test$i modify lob(token_info) (cache storage (buffer_pool keep));
    rem: now we must read the lob so that it will be loaded in the keep buffer pool, use the prccedure below
    create or replace procedure loadTokenInfo is
      type c_type is ref cursor;
      c2 c_type;
      s varchar2(2000);
      b blob;
      buff varchar2(100);
      siz number;
      off number;
      cntr number;
    begin
        s := 'select token_info from  DR$i_test$I';
        open c2 for s;
        loop
           fetch c2 into b;
           exit when c2%notfound;
           siz := 10;
           off := 1;
           cntr := 0;
           if dbms_lob.getlength(b) > 0 then
             begin
               loop
                 dbms_lob.read(b, siz, off, buff);
                 cntr := cntr + 1;
                 off := off + 4096;
               end loop;
             exception when no_data_found then
               if cntr > 0 then
                 dbms_output.put_line('4K chunks fetched: '||cntr);
               end if;
             end;
           end if;
        end loop;
    end;
    Rgds, Pierre

    I have been working a lot on that issue recently, I can give some more info.
    First I totally agree with you, I don't like to use the keep_pool and I would love to avoid it. On the other hand, we have a specific use case : 90% of the activity in the DB is done by queuing and dbms_scheduler jobs where response time does not matter. All those processes are probably filling the buffer cache. We have a customer facing application that uses the text index to search the database : performance is critical for them.
    What kind of performance do you have with your application ?
    In my case, I have learned the hard way that having the index in memory (the DR$I table in fact) is the key : if it is not, then performance is poor. I find it reasonable to pin the DR$I table in memory and if you look at competitors this is what they do. With MongoDB they explicitly says that the index must be in memory. With elasticsearch, they use JVM's that are also in memory. And effectively, if you look at the awr report, you will see that Oracle is continuously accessing the DR$I table, there is a SQL similar to
    SELECT /*+ DYNAMIC_SAMPLING(0) INDEX(i) */    
    TOKEN_FIRST, TOKEN_LAST, TOKEN_COUNT, ROWID    
    FROM DR$idxname$I
    WHERE TOKEN_TEXT = :word AND TOKEN_TYPE = :wtype    
    ORDER BY TOKEN_TEXT,  TOKEN_TYPE,  TOKEN_FIRST
    which is continuously done.
    I think that the algorithm used by Oracle to keep blocks in cache is too complex. A just realized that in 12.1.0.2 (was released last week) there is finally a "killer" functionality, the in-memory parameters, with which you can pin tables or columns in memory with compression, etc. this looks ideal for the text index, I hope that R. Ford will finally update his white paper :-)
    But my other problem was that the optimize_index in REBUILD mode caused the DR$I table to double in size : it seems crazy that this was closed as not a bug but it was and I can't do anything about it. It is a bug in my opinion, because the create index command and "alter index rebuild" command both result in a much smaller index, so why would the guys that developped the optimize function (is it another team, using another algorithm ?) make the index two times bigger ?
    And for that the track I have been following is to put the index in a 16K tablespace : in this case the space used by the index remains more or less flat (increases but much more reasonably). The difficulty here is to pin the index in memory because the trick of R. Ford was not working anymore.
    What worked:
    first set the keep_pool to zero and set the db_16k_cache_size to instead. Then change the storage preference to make sure that everything you want to cache (mostly the DR$I) table come in the tablespace with the non-standard block size of 16k.
    Then comes the tricky part : the pre-loading of the data in the buffer cache. The problem is that with Oracle 12c, Oracle will use direct_path_read for FTS which basically means that it bypasses the cache and read directory from file to the PGA !!! There is an event to avoid that, I was lucky to find it on a blog (I can't remember which, sorry for the credit).
    I ended-up doing that. the events to 10949 is to avoid the direct path reads issue.
    alter session set events '10949 trace name context forever, level 1';
    alter table DR#idxname0001$I cache;
    alter table DR#idxname0002$I cache;
    alter table DR#idxname0003$I cache;
    SELECT /*+ FULL(ITAB) CACHE(ITAB) */ SUM(TOKEN_COUNT),  SUM(LENGTH(TOKEN_INFO)) FROM DR#idxname0001$I;
    SELECT /*+ FULL(ITAB) CACHE(ITAB) */ SUM(TOKEN_COUNT),  SUM(LENGTH(TOKEN_INFO)) FROM DR#idxname0002$I;
    SELECT /*+ FULL(ITAB) CACHE(ITAB) */ SUM(TOKEN_COUNT),  SUM(LENGTH(TOKEN_INFO)) FROM DR#idxname0003$I;
    SELECT /*+ INDEX(ITAB) CACHE(ITAB) */  SUM(LENGTH(TOKEN_TEXT)) FROM DR#idxname0001$I ITAB;
    SELECT /*+ INDEX(ITAB) CACHE(ITAB) */  SUM(LENGTH(TOKEN_TEXT)) FROM DR#idxname0002$I ITAB;
    SELECT /*+ INDEX(ITAB) CACHE(ITAB) */  SUM(LENGTH(TOKEN_TEXT)) FROM DR#idxname0003$I ITAB;
    It worked. With a big relief I expected to take some time out, but there was a last surprise. The command
    exec ctx_ddl.optimize_index(idx_name=>'idxname',part_name=>'partname',optlevel=>'REBUILD');
    gqve the following
    ERROR at line 1:
    ORA-20000: Oracle Text error:
    DRG-50857: oracle error in drftoptrebxch
    ORA-14097: column type or size mismatch in ALTER TABLE EXCHANGE PARTITION
    ORA-06512: at "CTXSYS.DRUE", line 160
    ORA-06512: at "CTXSYS.CTX_DDL", line 1141
    ORA-06512: at line 1
    Which is very much exactly described in a metalink note 1645634.1 but in the case of a non-partitioned index. The work-around given seemed very logical but it did not work in the case of a partitioned index. After experimenting, I found out that the bug occurs when the partitioned index is created with  dbms_pclxutil.build_part_index procedure (this enables  enables intra-partition parallelism in the index creation process). This is a very annoying and stupid bug, maybe there is a work-around, but did not find it on metalink
    Other points of attention with the text index creation (stuff that surprised me at first !) ;
    - if you use the dbms_pclxutil package, then the ctx_output logging does not work, because the index is created immediately and then populated in the background via dbms_jobs.
    - this in combination with the fact that if you are on a RAC, you won't see any activity on the box can be very frightening : this is because oracle can choose to start the workers on the other node.
    I understand much better how the text indexing works, I think it is a great technology which can scale via partitioning. But like always the design of the application is crucial, most of our problems come from the fact that we did not choose the right sectioning (we choosed PATH_SECTION_GROUP while XML_SECTION_GROUP is so much better IMO). Maybe later I can convince the dev to change the sectionining, especially because SDATA and MDATA section are not supported with PATCH_SECTION_GROUP (although it seems to work, even though we had one occurence of a bad result linked to the existence of SDATA in the index definition). Also the whole problematic of mixed structured/unstructured searches is completly tackled if one use XML_SECTION_GROUP with MDATA/SDATA (but of course the app was written for Oracle 10...)
    Regards, Pierre

  • Read memory data [ANRITSU MS9710B GPIB]

    Hi,
    I'm trying to get spectrum form ANRITSU MS9710B OSA via GPIB link. I am using the VIs provided by ANRITSU, there is no problem to detect the device and communicate with it.
    I got a problem when it comes to get the values from memory with "Read memory data.vi"; in fact, it detects the start wavelenght but stops the acqusition at the first sample.
    Does anyone already had this problem ?
    I can read this warning from the vi : 1073676294 occured at VISA read
    Possible reasons: VISAHex 0xFFF0006). The number of bytes transfered is equal to the requested input count.More data might be available.
    Thanks.

    Status Code 1073676294 (Hex 0x3FFF0006) maps to a status report. The status code indicates The Number of Bytes Transferred is Equal to the Input Count. More data might be available. This is not an error message, but a status report indicating the value written to the input buffer has been successfully read and is now empty.
    Error codes are assigned negative numbers and status codes are assigned positive numbers. You can ignore an error if the error status boolean is false. There are a variety of positive status codes such as this one that the VISA driver can return indicating the status after VISA operations.
    Because there are applications where it is necessary to check status codes, they are returned in the error cluster without the error boolean set.

  • Can u buy additional memory with a itunes card

    can you buy additional memory with a itunes gift card

    Buy more space for iCloud? Yes.
    Have an 8GB iPod touch and want to add more room? Nope.

  • I usually have 30 tabs opened, and the memory rises in a unexpected way, so that when it reaches 1.2GB, firefox crashes. Also If, for e.g. firefox is consuming 700MB of memory (with a lot of tabs opened), and if i close those tabs and leave just 2 or 3 op

    I usually have 30 tabs opened, and the memory rises in a unexpected way, so that when it reaches 1.2GB, firefox crashes. Also If, for e.g. firefox is consuming 700MB of memory (with a lot of tabs opened), and if i close those tabs and leave just 2 or 3 opened, the memory usage does not drop and remains at the 700MB. Thanks for your attention. Regards, Ricardo
    == Crash ID(s) ==
    bp-765e3c37-0edc-4ed6-a4e9-7ed612100526

    I have the same sort of problem with 30-40 tabs, with memory usage growing until crash. It seems a lot worse after restarting from standby mode on Vista - it's often highly unresponsive for a while, then crashes.
    I have a fair few plug-ins running, one of the benefits of Firefox.
    Is their a way I can log memory use of parts of Firefox, particularly the plug-ins?

  • Automatic creation of meter reading order with RR 01 after move in

    Hi Experts,
    I am working with DM module of SAP ISU. Currently I am facing a problem that after move-in a periodic order with RR 01 is created automatically by the system.
    Ex: Move in date is 01.01.2010. Monthly Portion is used with Schedule record 01.01.2010, 01.02.2010, 01.03.2010, 01.04.2010 and so on.
    If current system date is 20.07.2010 then Meter reading order is created automatically by the system for sch MRD 01.08.2010.
    Please let me know how to overcome this problem
    Thanks in advance.

    Thanks for your reply but I have cheked SPRO setting the suggested check box is already uncheked.
    I beleive the setting you have suggested is meant for default meter reading during move in but my problem is automatic creation of meter reading order with reason 01 during move in

  • How to open Adobe Reader DC with Tools Closed

    I do NOT like Adobe Reader opening with the right column expanded.
    Also in all previous versions I could save my login for protected pdf files and when I open them back up they require a login again.  This is NOT how I want to work with Adobe Reader.
    Adobe Reader XI was so much better.

    Before the DC version we were able to save our passwords from University of Phoenix and when opening the document saved to our computer it would unlock automatically.  This new version DC will not allow this even when you say Yes.
    The Right Column in previous versions would allow to disable until requested to appear this version it opens regardless.
    Just my opinion here  Adobe Failed at customer satisfaction on this UPGRADE.
    Regards a unhappy Adobe user.

  • I have Adobe Reader XI with the package that allows me to send pdfs and convert pdfs to Word. When I open a pdf and try to search the search shows no matches even though the word I am searching for is in the document. Any suggestions how to search?

    I have Adobe Reader XI with the package that allows me to send pdfs and convert pdfs to Word. When I open a pdf and try to search the search shows no matches even though the word I am searching for is in the document. Any suggestions how to search?

    Once again, my thanks for your assistance. If I may impose on your generosity one more time, if I understand you correctly if I create a document on Word or WordPerfect, print it and scan it to create a pdf, the search function will not work. But, if I create the pdf document in Word the search function will work. Unfortunately, I am not sure how I create a pdf document in Word other than by printing it and scanning it. Could you please explain.
    William B. Kohn, Esq.
    General Counsel
    Paul V. Profeta & Associates, Inc.
    769 Northfield Avenue
    Suite 250
    West Orange, New Jersey 07052
    (973) 325 - 1300
    (973) 325 - 0376 (Facsimile)
    [email protected]<mailto:[email protected]>

  • Can I hide the Adobe Reader panel with "Export, Create and Edit PDF and Send and Store Files"? I don't use it and it takes up space

    Can I hide the Adobe Reader panel with "Export, Create and Edit PDF and Send and Store Files"? I don't use it and it takes up space

    If you just mean, can I close it, sure. In Reader XI click the Tools button to close or reopen it.

  • How to Write BUFFER & Read TEXT with Encrypt file ?

    I'm using Windows Phone 8.1 RT.
    I have a issue :
    - I write a BUFFER encrypted to file. After, I read file with TEXT. It's throw exception : No mapping for the Unicode character exists in the target multi-byte code page. (//ERROR 2)
    - I write a TEXT encrypted to file. After, I read file with BUFFER. It's throw exception : The supplied user buffer is not valid for the requested operation. (//ERROR 1)
    Code Write Buffer & Read Text.
    //Write Textstring msg = EncryptText.Text;
    //ERROR 1 - Use 1 or 2
    await WriteTextAsync(this.file, msg);
    //ERROR 1
    //Read Buffer
    string msg;
    //ERROR 1 - Use 1 or 2
    IBuffer buffer = await ReadBufferAsync(this.file);
    StreamReader stream = new StreamReader(buffer.AsStream());
    msg = stream.ReadToEnd();
    //ERROR 1
    Code Encrypt-Decypt.
    public static string EncryptString(string msg)
                var bufferMsg = CryptographicBuffer.ConvertStringToBinary(msg, BinaryStringEncoding.Utf8);
                var bufferMsgEncrypted = Encrypt(bufferMsg);
                var msgEncrypted = CryptographicBuffer.EncodeToBase64String(bufferMsgEncrypted);
                return msgEncrypted;
            }public static IAsyncAction WriteTextAsync(IStorageFile file, string msg)
                return FileIO.WriteTextAsync(file, EncryptString(msg));
    public static IBuffer Decrypt(IBuffer bufferMsg)
                var key = CreateKey(KEY);
                var aes = SymmetricKeyAlgorithmProvider.OpenAlgorithm(SymmetricAlgorithmNames.AesCbcPkcs7);
                var symetricKey = aes.CreateSymmetricKey(key);
                var bufferMsgDecrypted = CryptographicEngine.Decrypt(symetricKey, bufferMsg, null);
                return bufferMsgDecrypted;
            }public static IAsyncOperation<IBuffer> ReadBufferAsync(IStorageFile file)
                var buffer = FileIO.ReadBufferAsync(file);
                Task<IBuffer> result = Task.Run<IBuffer>(async () =>
                    var Buffer = await buffer;
                    return Decrypt(Buffer);
                return result.AsAsyncOperation();
    Link demo code :
    https://drive.google.com/file/d/0B_cS3IYO936_akE0cmI4bExJMjh0RU9qR3RvWDBWWElZWC1z/view?usp=sharing

    Please provide a working app so this can be tested. You can upload to OneDrive and share a link here.
    Matt Small - Microsoft Escalation Engineer - Forum Moderator
    If my reply answers your question, please mark this post as answered.
    NOTE: If I ask for code, please provide something that I can drop directly into a project and run (including XAML), or an actual application project. I'm trying to help a lot of people, so I don't have time to figure out weird snippets with undefined
    objects and unknown namespaces.

  • Acrobat Reader 9 with an Acrobat 5-8 Created Form?

    Hello,
    I am new to my job working with a ASP application that provides a PDF form (created in Acrobat 5) to users with free form fields the user can fill out. Users have been complaining that when the users upgrade to Acrobat Reader 9 the begin to see a message that "Some features are no longer supported..." and the ability to save a local copy of the file is no longer available. I have 2 questions.
    Was the ability to save a local copy of a form with user-entered data ever a feature of Acrobat Reader?
    Is that an issue that can be corrected by creating the form with the Acrobat 9 Life-Cycle engine or is that a feature that has forever been removed from Acrobat Reader for security or other reasons?
    Thanks in advance!
    - Yohancef

    No I was referring to filled in forms.
    So to be clear, the ability to save user filled in information in a form to your local machine as a PDF through Acrobat Reader IS possible, but has to be enabled by opening the form in Acrobat (9 for example) and enabling some feature to allows Reader users to save the information to the PDF file. This feature is limited though and has a 500 use limit per form.
    Does that sound correct? Do you know the name of the feature?
    Thanks!
    Yohancef Chin
    Date: Mon, 2 Nov 2009 16:49:54 -0700
    From: [email protected]
    To: [email protected]
    Subject: Acrobat Reader 9 with an Acrobat 5-8 Created Form?
    There may be some features that are not supported any more but you may not be using them. That message is just a general warning.
    As far as saving the filled in form, Reader on it's own has never been able to do it. You would need the newer version of Acrobat to open and enable these forms so that Reader users can save them. Be warned that this feature is meant for small-time use and has a usage limit of 500 submissions per form. You'll want to read and undestand the EULA before planning on using it.
    Or do you mean that they cannot save the unfilled form either?
    >

  • Opening pdf within app using Reader X with Acrobat 5.0 installed

    I have Acrobat 5.0 (full version) installed and also Reader X.  Acrobat 5 was installed 1st and has been my default.  But, I loaded Reader X because Acrobat 5 can't open many newer PDF's.  I have gone into Explorer and set Reader X as the "open with" application and double-clicking a PDF in Explorer results in Reader X opening the file.  The problem is within various applications which have menu links to PDF's.  Many of these still attempt to open the PDF with Acrobat 5.  I believe this may be more a Windows XP problem, but thought maybe someone might know what registry item I should look for to correct this problem since it is related to Acrobat.

    Adobe doesn't recommend the use of Reader X with anything but Acrobat X. Earlier versions of Acrobat and Reader (any version) will cause problems. Remove Acrobat, re-install Reader X and if needed upgrade to Acrobat X or Remove Reader X and re-install ACrobat 5.

  • Play sound in specified duration and desired file when reading data with VISA functions ?

    Hi.
    I read data with VISA read function and then I use functions like multiple for showing data on Chart. every thing is OK.
    now I want play a specified sound (like a 3s song in my desired folder) when signal amplitude cross over from specified value. then after desired time, it will be wait for new cross over from specified value and this procedure repeat again until I stop the program. 
    Data comes from MCU and it doesn't stop when system play sound and when system play sound, incoming data for this part (play sound) will be ignore them until specified expired.
    I use this VI for reading data and mentioned part which is sound part is empty and I don't know what I must done ?
    altougth I use another while loop for sound apart because I want save CPU time.
    Thanks.
    Solved!
    Go to Solution.

    I would recommend making the data type of the notifier a cluster that contains a path and a numeric (double).  The path tells the player which file to play.  The numeric is the duration.
    Now for a slightly complicated, but really neat, way to stop your second loop.  Do not use a second notifier.  Instead, send the normal notification but use Not A Path for the path in the cluster.  Your second loop can do a check for the Not A Path and stop when that is recieved.
    There are only two ways to tell somebody thanks: Kudos and Marked Solutions
    Unofficial Forum Rules and Guidelines

  • Writing/reading HTTP with plain sockets

    Hello,
    I�m trying to send an HTTP request and read response with a plain socket, not URLConnection.
    My code below is not working - bufferedReader.readLine() returns null.
    If I change to URLConnection everything works as expected.
    What is wrong in my code?
    Thanks in advance.
    public static void main(String[] args) throws IOException {
            /* -- THIS CODE WORKS --
            URL url = new URL("http://localhost:8080");
            URLConnection urlConnection = url.openConnection();
            OutputStream outputStream = urlConnection.getOutputStream();
            InputStream inputStream = urlConnection.getInputStream();
            // Tomcat is running on localhost port 8080
            Socket socket = SocketFactory.getDefault().createSocket("localhost", 8080);
            OutputStream outputStream = socket.getOutputStream();
            InputStream inputStream = socket.getInputStream();
            // send request
            OutputStreamWriter writer = new OutputStreamWriter(outputStream);
            writer.write("GET /");
            writer.flush();
            // read response
            BufferedReader bufferedReader = new BufferedReader(new InputStreamReader(inputStream));
            StringBuilder strBuilder = new StringBuilder();
            String str;
            while ((str = bufferedReader.readLine()) != null) {
                strBuilder.append(str);
            // print response
            System.out.println(strBuilder);
            socket.close();
        }

    Your HTTP request-line(s) syntax is quite wrong.
    Learn them at w3.org site.

Maybe you are looking for