Efficient algorithm

Give me an efficient algorithm to find out the points in the arc.

import java.awt.*;
import java.awt.event.*;
import java.awt.geom.*;
import java.text.NumberFormat;
import javax.swing.*;
import javax.swing.event.MouseInputAdapter;
public class ArcPoints extends JPanel
    Arc2D arc;
    Line2D radial;
    Point2D curveLoc;
    boolean showConstruction;
    JLabel label;
    NumberFormat nf;
    public ArcPoints()
        showConstruction = false;
        nf = NumberFormat.getInstance();
        nf.setMaximumFractionDigits(1);
        Pointer pointer = new Pointer(this);
        addMouseListener(pointer);
        addMouseMotionListener(pointer);
    protected void paintComponent(Graphics g)
        super.paintComponent(g);
        Graphics2D g2 = (Graphics2D)g;
        g2.setRenderingHint(RenderingHints.KEY_ANTIALIASING,
                            RenderingHints.VALUE_ANTIALIAS_ON);
        if(arc == null)
            initArc();
        g2.draw(arc);
        g2.setPaint(Color.blue);
        g2.draw(radial);
        drawPoint(g2, Color.red, curveLoc.getX(), curveLoc.getY());
        if(showConstruction)
            showConstructionMarkers(g2);
    protected void setRadial(Point p, double theta)
        double cx = arc.getCenterX();
        double cy = arc.getCenterY();
        double x = cx + (arc.getWidth()/2)  * Math.cos(theta);
        double y = cy + (arc.getHeight()/2) * Math.sin(theta);
        radial.setLine(cx, cy, x, y);
        curveLoc.setLocation(x, y);
        label.setText("x = " + nf.format(x) + "    y = " + nf.format(y));
        repaint();
    private void initArc()
        double w = getWidth();
        double h = getHeight();
        double x = w/6;
        double y = h/8;
        double width  = w*2/3;
        double height = h*5/8;
        double start  = 0.0;
        double extent = 300.0;
        int type = Arc2D.OPEN;
        arc = new Arc2D.Double(x, y, width, height, start, extent, type);
        radial = new Line2D.Double();
        curveLoc = new Point2D.Double();
    private void showConstructionMarkers(Graphics2D g2)
        g2.setPaint(Color.orange);
        Rectangle2D r = arc.getFrame();
        g2.draw(r);
        g2.setPaint(Color.green.darker());
        double theta = Math.toRadians(arc.getAngleStart() - arc.getAngleExtent());
        g2.draw(getRadialMarker(theta));
        theta = Math.toRadians(arc.getAngleStart());
        g2.draw(getRadialMarker(theta));
    private void drawPoint(Graphics2D g2, Color color, double x, double y)
        g2.setPaint(color);
        g2.fill(new Ellipse2D.Double(x-2, y-2, 4, 4));
    private Line2D getRadialMarker(double theta)
        Rectangle2D r = arc.getFrame();
        double x = r.getCenterX() + (r.getWidth()/2) * Math.cos(theta);
        double y = r.getCenterY() + (r.getHeight()/2) * Math.sin(theta);
        return new Line2D.Double(r.getCenterX(), r.getCenterY(), x, y);
    private JLabel getLabel()
        label = new JLabel("curve x and y", JLabel.CENTER);
        Dimension d = label.getPreferredSize();
        d.height = 25;
        label.setPreferredSize(d);
        return label;
    public static void main(String[] args)
        ArcPoints arcPoints = new ArcPoints();
        JFrame f = new JFrame();
        f.setDefaultCloseOperation(JFrame.EXIT_ON_CLOSE);
        f.add(arcPoints);
        f.add(arcPoints.getLabel(), "South");
        f.setSize(400,400);
        f.setLocation(200,200);
        f.setVisible(true);
class Pointer extends MouseInputAdapter
    ArcPoints ap;
    public Pointer(ArcPoints ap)
        this.ap = ap;
    public void mousePressed(MouseEvent e)
        ap.showConstruction = !ap.showConstruction;
        ap.repaint();
    public void mouseMoved(MouseEvent e)
        Point p = e.getPoint();
        // if point is within arc show radial from frame center to arc
        if(ap.arc.contains(p))
            Rectangle2D r = ap.arc.getFrame();
            double dy = p.y - r.getCenterY();
            double dx = p.x - r.getCenterX();
            double theta = Math.atan2(dy, dx);
            double degrees = Math.toDegrees(theta);
            if(degrees < 0.0)
                degrees += 360.0;
            if(containsAngle(degrees))
                ap.setRadial(p, theta);           
     * Arc2D counts angles as increasing counter-clockwise
     * the Math class and the rest of the java.awt.geom
     *     package count angles as increasing clockwise
     * this method tries to bridge the difference
    private boolean containsAngle(double degrees)
        Arc2D arc = ap.arc;
        double start, extend, end;
        double extent = arc.getAngleExtent();
        if(extent > 0.0)
            start = -arc.getAngleExtent();
            end = start + arc.getAngleExtent();
            degrees -= 360.0;
        else
            start = arc.getAngleStart();
            end = start - arc.getAngleExtent();
        return start <= degrees && degrees <= end;
}

Similar Messages

  • Is there any toolbox for acoustics with the Maximum Length Sequence (MLS) algorithm?

    The MLS is an efficient algorithm for measuring the impulse response using pseudorandom noise. So, I think that it would be interesting this function be available in the Labview or in a Labview toolbox.

    Acoustics,
    There is currently no such toolbox but you can submit this suggestion in the product feedback form at this link.
    https://sine.ni.com/apps/we/nicc.call_me?p_action=country&p_lang_id=US
    Cyril
    Cyril Bouton
    Active LabVIEW Developper

  • Efficiency of Java String operations

    Hi,
    for an Information Retrieval project, I need to make extensive use of String operations on a vast number of documents, and in particular involving lots of substring() operations.
    I'd like to get a feeling how efficient the substring() method of java.lang.String is implemented just to understand whether trying to optimize it would be a reasonable option (I was thinking of an algorithm for efficient string pattern matching such as the Knuth-Morris-Pratt algorithm, but if java.lang.String already applies similarly efficient algorithms I would not bother).
    Can someone help?
    J

    Thanks for your comment. Yes of course you're right, I
    mean indexOf(). If so (thanks DrClap), let me enter the discussion.
    The indexOf() implements a so called "brute force algorithm".
    The performance is O(n*m), where n is the length of the text, and
    m is the length of the pattern, but is close to n on the average.
    The KMP is O(n), so the performance gain should be hardly noticeable.
    To get a real performance gain you should look at the BM (Boyer-Moore,
    O(n/m)) algorithm or some of its descendants.
    As for java.util.regex package, as far as i understand it should be
    several times slower than indexOf(), because it reads EACH character through an interface method (as opposed to direct array access in indexOf()).
    Though it's still to be proved experimentally.

  • Distance Algorithm. Please help!

    I need to find an algorithm for calculating the closest point to each point contained in a given array. That is, for each point in an array of randomly generated points, I need to find the point closest to it other than itself. I already have a basic algorithm to solve this, but it compares the distances between the current point and every other point in the array, making it very inefficient (O(n^2)). I am looking for a more efficient algorithm. Can anyone point me in the right direction? No code needed, I can handle that myself. Thanks.
    Edited by: 799505 on Mar 3, 2011 4:02 PM

    837443 wrote:
    What about sorting the array. I think the method is Arrays.sort() in the java util. Then with a sorted array you only have to check the point before and the point after. That way you only have to check 2 points instead of the whole array.
    Or, what about using a tree instead of an array. Trees will sort themselves automatically with each addition. Then you only have to check 2 points (the one before and after).
    Or, another option, you could convert the array to a tree.
    I think that if the array is NOT sorted then you will have to check every single element of the array.
    I hope that helps with your efficiency problem. Take care.That works if they're points on a line, but not if they're points in a plane or other higher-than-one dimensional space. Unless there are rules that say that, for instance (1, 2) is "closer to" the origin than (2, 1), and those rules can impose a total ordering on the points.

  • Any good algorithm to balanced a strict binary tree for a ordered set

    Hi,
    I have created a syntax tree of a binary operator that is associative but not commutative. The resultant is a unbalanced strict binary tree.
    I have tried using AVL tree as well as a heap. But they will occasionally destruct the non-commutative property of the syntax tree.
    Is there any efficient algorithm that I can used to balance this tree?
    Thank you.

    Is linear time good enough?
    1. Traverse your tree to build a list.
    2. Recursively turn the list into a tree. Pseudocode for this step: fun listToTree( [a] ) = Leaf(a)
      | listToTree( xs ) = Node( listToTree(firstHalf(xs)), listToTree(secondHalf(xs)) );

  • Sine-fit algorithm

    Hi,
    I  would like to fit my data (about 200 kpoints) with a sine-wave (with 4 parameters : A.sin(2pi.B.t + C) + D) in LabVIEW. I'm looking for a VI with a convergent algorithm (such as least-square solver) which return me the 4 parameters + the RMS error betwen measurement data and model data. Or why not a more efficient algorithm...
    I have a look in "Optimisation function set" and also in "Ajustement function set" but I didn't find the good VI, Someone can advice me ?
    Regards,
    Benjamin

    You can use the levenberg-marquard fit , but usually the extract single tone information work good enough
    There is a paper from 99 ni'week about the performance of this vi, I added it, because I missed the link
    Depending on your signal, it might be wise to try blocks of complete periods and mean the results....
    I attached a vi that uses this single tone vi to monitor the line frequency , just abuse a headphone as a coil receiver and plug it into the mic input of your soundcard and place the headphone near a transformer...
    Greetings from Germany
    Henrik
    LV since v3.1
    “ground” is a convenient fantasy
    '˙˙˙˙uıɐƃɐ lɐıp puɐ °06 ǝuoɥd ɹnoʎ uɹnʇ ǝsɐǝld 'ʎɹɐuıƃɐɯı sı pǝlɐıp ǝʌɐɥ noʎ ɹǝqɯnu ǝɥʇ'
    Attachments:
    line freq with soundcard v2.vi ‏35 KB
    FFT tone detection Moriat NIWeek99.ppt ‏1159 KB

  • How to maintain pixel size when exporting?

    When exporting an edited image (ex: 20 MB) from Aperture to a desktop folder, I choose file>export>versions>jpeg original.  When I open the exported image in the desktop folder (it opens in preview), the Tools/Inspector window says that the image is now only 2MB.  This has not always happened.  Can someone tell me what I might be doing or not doing that is causing this, or what settings I need to change?
    Thank you.

    golindy,
    JPEG is a lossy format that relies on two ways of reducing the file size. The first is the lossy part that throws away data that is percieved to be unimportant for human vision, the resulting data is then further compressed losslessly using the same type of compression zip uses (this is why zipping a JPEG doesn't lead to further size reduction).
    The more you throw away in the first part, the more likely the image is to suffer from 'compression artefacts'. The number you select on export determines how much data gets thrown away.
    I don't know what '12' officially means for Apple, but from eyeballing files, it seems like it throws away pretty close to zero. So a file output with 12 is top quality.
    Aperture has always worked the same way here, it hasn't changed during any update.
    However, some things that may affect it are:
    1) The effeciciency of the both compression algorithms seems to improve over time.
    So a year ago, a 20 Mega Pixel image might compress down to a 5MB file using a quality setting of 10. Today that very same file may compress down to a 4MB file at quality 10. But it's important to note, the quality of the file will be the same, quality 10. All that has happened is the more efficient algorithm has allowed an extra 1MB of file space saving.
    2) The raw converters periodically get improved with new rendering. One area that has continually improved in Apeture is noise supression and more natural sharpening.
    These impact the first part of the compression process. A noisy image with harsh shapening will compress less than a clean image with more subtle sharpening. So better raw processing can also impact the exported size. Note, this one works in both directions, an improved conversion that resolves more details will output a larger file size, whereas an improved conversion that gives less noise will output a smaller file size.
    So in your case, I think what was happening was you were not losing quality, just gaining file size efficiency, and now you have changed to quality 12, you are exploiting that file space saving by now outputting higher quality files.
    Andy

  • Graph List Coloring

    Hi
    please can someone help me for this algorithm.
    The problem is the classic graph coloring with the condition: two adjacent nodes do not have the same color and any vertix has a list of admitted colors.
    thanks a lot

    Hi
    please can someone help me for this algorithm.
    The problem is the classic graph coloring with the
    condition: two adjacent nodes do not have the same
    color and any vertix has a list of admitted colors.
    thanks a lotThis is related to the map colouring problem. If you draw
    a link between the capital cities of each pair of countries
    with a common border, you get a graph. Conversely the graph
    gives rise to a map.
    Therefore, the literature on map colouring could be helpful.
    The fact that each vertex has a list of admissible colours is
    a complication that could prevent any solution from existing.
    For example, if two vertices have only one admissible colour,
    the same for each, then it can't be done. The famous four
    colour theorem, first proved by Appel and Haaken in the 1970s
    (using a computer intensive proof) shows that a colouring is
    always possible if all vertices have the same list of four
    colours, but it was known long before that if there are only
    three colours, there are maps that can't be coloured.
    Obviously, an exhaustive search would work in theory, but it
    would at least be an exponential time algorithm.
    Start with one colour for any vertex and choose a different
    colour for each adjacent vertex. Then do the same for each of
    the newly coloured vertices, skipping those that have already
    been coloured. Keep going until a vertex can't be coloured or
    the whole graph has been coloured.
    Each time you get stuck, backtrack and change a colour until
    you've tried every colour on each list, or you find a colouring.
    Hopefully there's a more efficient algorithm in the literature.

  • Different Cost values for full table scans

    I have a very simple query that I run in two environments (Prod (20 CPU) and Dev (12 CPU)). Both environemtns are HPUX, oracle 9i.
    The query looks like this:
    SELECT prd70.jde_item_n
    FROM gdw.vjda_gdwprd68_bom_cmpnt prd68
    ,gdw.vjda_gdwprd70_gallo_item prd70
    WHERE prd70.jde_item_n = prd68.parnt_jde_item_n
    AND prd68.last_eff_t+nvl(to_number(prd70.auto_hld_dy_n),0)>= trunc(sysdate)
    GROUP BY prd70.jde_item_n
    When I look at the explain plans, there is a significant difference in cost and I can't figure out why they would be different. Both queries do full table scans, both instances have about the same number of rows, statistics on both are fresh.
    Production Plan:
    0 SELECT STATEMENT Optimizer=ALL_ROWS (Cost=18398 Card=14657 B
    ytes=249169)
    1 0 SORT (GROUP BY) (Cost=18398 Card=14657 Bytes=249169)
    2 1 HASH JOIN (Cost=18304 Card=14657 Bytes=249169)
    3 2 TABLE ACCESS (FULL) OF 'GDWPRD70_GALLO_ITEM' (Cost=949
    4 Card=194733 Bytes=1168398)
    4 2 TABLE ACCESS (FULL) OF 'GDWPRD68_BOM_CMPNT' (Cost=5887
    Card=293149 Bytes=3224639)
    Development plan:
    0 SELECT STATEMENT Optimizer=ALL_ROWS (Cost=3566 Card=14754 B
    ytes=259214)
    1 0 HASH GROUP BY (GROUP BY) (Cost=3566 Card=14754 Bytes=259214)
    2 1 HASH JOIN (Cost=3558 Card=14754 Bytes=259214)
    3 2 TABLE ACCESS (FULL) OF 'GDWPRD70_GALLO_ITEM' (Cost=1914
    4 Card=193655 Bytes=1323598)
    4 2 TABLE ACCESS (FULL) OF 'GDWPRD68_BOM_CMPNT' (Cost=1076
    Card=295075 Bytes=3169542)
    There seems to be no reason for the costs to be so different, but I'm hoping that someone will be able to lead me in the right direction.
    Thanks,
    Jdelao

    This link may help:
    http://jaffardba.blogspot.com/2007/07/change-behavior-of-group-by-clause-in.html
    But looking at the explain plans one of them uses a SORT (GROUP BY) (higher cost query) and the other uses a HASH GROUP BY (GROUP BY) (lower cost query). From my searches on the `Net the HASH GROUP BY is a more efficient algorithm than the SORT (GROUP BY) which would lead me to believe that this is one of the reasons why the cost values are so different. I can't find which version HASH GROUP BY came in but quick searches indicate 10g for some reason.
    Are your optimizer features parameter set to the same value? In general you could compare relevant parameters to see if there is a difference.
    Hope this helps!

  • SQL question that's always bugged  me

    SQL> select * from customer where customer_id = 4;
    no rows selected
    SQL> select max(customer_id) from customer where customer_id = 4;
    MAX(CUSTOMER_ID)
    SQL>
    WHY!!!!!
    This has been driving me nuts, for the past hour or so trying to hunt down a bug in some code, it turned out that due to the max() a function wasn't raising a NO_DATA_FOUND exception and failing elsewhere with an obscure error because of it. after a "select max() into" clause, we have to add an additional check to see if the value it selected into is null or not..... how dumb is that?
    ok, I've calmed down now.

    Michael:
    Both seem quite reasonable to me. Conceptually. ignoring the GROUP BY bit, a COUNT works like:
    DECLARE
       l_cnt NUMBER :=0;
    BEGIN
       FOR r IN (<query>) LOOP
          l_cnt := l_cnt + 1;
       END LOOP;
       RETURN l_cnt;
    END;While a MAX or MIN works like:
    DECLARE
       l_max VARCHAR2(4000) := NULL;
       FOR r IN (<query>) LOOP
          IF r.max_col > l_max THEN
            l_max := r.max_col;
          END IF;
       END LOOP;
       RETURN l_max;
    END;I'm sure that Oracle has more efficient algorithms, but the point is that the aggregate functions iterate over a result set to generate the result The fact that the result set is empty in some cases doesn't really matter.
    Think about what it would mean if some aggregates, like COUNT or SUM, never threw NO_DATA_FOUND, but others like MAX OR MIN did if there were no rows matching the criteria.
    In both of your example cases. grouping does take place:
    SQL> SELECT COUNT(*) FROM employees;
      COUNT(*)
           107
    Execution Plan
       0      SELECT STATEMENT Optimizer=CHOOSE (Cost=1 Card=1)
       1    0   SORT (AGGREGATE)
       2    1     INDEX (FULL SCAN) OF 'EMP_EMAIL_UK' (UNIQUE) (Cost=1 Card=107)It is just that the group is all of the records.
    John
    Message was edited by:
    John Spencer
    Added the grouping comments.

  • Radix Sort

    I need to lexicographically orginize a list of names. I looked over the internet to find comparisson methods and for what I could see the most efficient one is Radix Sort. Now, I didn't quite understood how it works and how can I use it. I'm a newbie in java so I didn't get most of the code I saw. Could you help me kinda giving me a quick example or something like on how to do it?
    Thanks in advance...

    There's a tutorial on Collections that you should read here:
    http://java.sun.com/docs/books/tutorial/collections/index.html
    And when you read that something is "the" most efficient algorithm for sorting, don't believe it. It's actually more complicated than that.

  • Is anyone working with large datasets ( 200M) in LabVIEW?

    I am working with external Bioinformatics databasesa and find the datasets to be quite large (2 files easily come out at 50M or more). Is anyone working with large datasets like these? What is your experience with performance?

    Colby, it all depends on how much memory you have in your system. You could be okay doing all that with 1GB of memory, but you still have to take care to not make copies of your data in your program. That said, I would not be surprised if your code could be written so that it would work on a machine with much less ram by using efficient algorithms. I am not a statistician, but I know that the averages & standard deviations can be calculated using a few bytes (even on arbitrary length data sets). Can't the ANOVA be performed using the standard deviations and means (and other information like the degrees of freedom, etc.)? Potentially, you could calculate all the various bits that are necessary and do the F-test with that information, and not need to ever have the entire data set in memory at one time. The tricky part for your application may be getting the desired data at the necessary times from all those different sources. I am usually working with files on disk where I grab x samples at a time, perform the statistics, dump the samples and get the next set, repeat as necessary. I can calculate the average of an arbitrary length data set easily by only loading one sample at a time from disk (it's still more efficient to work in small batches because the disk I/O overhead builds up).
    Let me use the calculation of the mean as an example (hopefully the notation makes sense): see the jpg. What this means in plain english is that the mean can be calculated solely as a function of the current data point, the previous mean, and the sample number. For instance, given the data set [1 2 3 4 5], sum it, and divide by 5, you get 3. Or take it a point at a time: the average of [1]=1, [2+1*1]/2=1.5, [3+1.5*2]/3=2, [4+2*3]/4=2.5, [5+2.5*4]/5=3. This second method required far more multiplications and divisions, but it only ever required remembering the previous mean and the sample number, in addition to the new data point. Using this technique, I can find the average of gigs of data without ever needing more than three doubles and an int32 in memory. A similar derivation can be done for the variance, but it's easier to look it up (I can provide it if you have trouble finding it). Also, I think this funtionality is built into the LabVIEW pt by pt statistics functions.
    I think you can probably get the data you need from those db's through some carefully crafted queries, but it's hard to say more without knowing a lot more about your application.
    Hope this helps!
    Chris
    Attachments:
    Mean Derivation.JPG ‏20 KB

  • Java Linear, Quicksort, Binary Time Calc Problems

    Objective of program: Simple program demonstrating the time it takes to do each algorithm.
    My approach:
    1) Prompt a number
    2) Make array of linear ints
    3) Calculate time it takes for a linear search
    4) Repeat for quick sort, and Binary search
    Problems:
    1) Linear search time is always 0
    2) Quick sort never ends.
    3) Don't know if binary works yet since it never goes beyond quicksort.
    Any help or suggestions are greatly appreciated.
    import java.util.Date;
    import java.util.ArrayList;
    import java.util.Random;
    import java.util.Arrays;
    import java.util.Scanner;
    public class Driver {
         public static void main(String[] args) {
              Random generator = new Random();               
              int[] linear = new int[1000000];                           // Create Linear Array
              for(int i = 0; i < linear.length; i++)                 // filling up the array
                   linear[i] = i;
              Scanner input = new Scanner(System.in); 
              System.out.print("Please enter number to search: ");       // Asks
              int search = input.nextInt();                              // Stores number
              Date end = new Date();                                   // Create Timer          
              long startTime1 = end.getTime();                           // Start Time
              for(int i = 0; i < linear.length; i++) {               // Linear Search
                   if (linear[i] == search) {
                        long endTime1 = end.getTime();                 // If found, end time
                        System.out.println("Time of Linear search: " + (endTime1 - startTime1));          // Prints elapsed time
                        break;
              int[] quicksort = new int[1000000];                    // Creates quicksort array
              for(int i = 0; i < quicksort.length; i++)                // Initializes the array
                   quicksort[i] = generator.nextInt(100000);
              long startTime2 = end.getTime();                         // Starts the time
              for(int i = 0; i < 1000000; i++)                         // Sorts...
                   Arrays.sort(quicksort);
              long endTime2 = end.getTime();                         // Ends time
              System.out.println("Time of QuickSort: " + (startTime2 - endTime2));               // Prints elapsed time
              int[] binary = new int[1000000];                         // Creates binary array
              for(int i = 0; i < binary.length; i++)                    // Initializes binary array
                   binary[i] = generator.nextInt();
              long startTime3 = end.getTime();                         // Start time
              Arrays.binarySearch(binary,search);                      // Binary Search
              long endTime3 = end.getTime();                         // Ends time
              System.out.println("Time of Binary Search: " + (endTime3 - startTime3));     // Prints out time
    }Edited by: onguy3n on Mar 26, 2009 4:39 AM

    ibanezplayer85 wrote:
    Any help or suggestions are greatly appreciated.
    Suggestion: Break your code up into different methods and even separate classes, if necessary; don't just use main for everything. Maybe you posted it this way to have it all in one class for the forum to read, but it's very confusing to look at it and understand it this way. I know that this isn't an answer to your question, but you did ask for suggestions :)Thanks, it was just a demonstration program in class so he didn't really care about readability, but yes I should have separated it in a different class.
    >
    Linear search time is always 0I'm not sure what the convention is, but whenever I needed to measure time for an algorithm, I used System.currentTimeMillis(); rather than the Date class.
    e.g.
    long startTime = System.currentTimeMillis();
    long endTime = System.currenTimeMillis();
    long totalTime = endTime - startTime;Although, I think if you're not printing anything out to the console, it will most likely print out 0 as the time (because most of the processing is working on printing out the data). That is, unless you're doing some heavy processing.Thanks! I tried System.currentTimeMillis() and it now works. I still don't understand your explanation why it prints out 0 though :(
    >
    Quick sort never ends.I think it's just taking a while. It's not an efficient algorithm with a worst case time complexity of O(n^2) and you gave it a very large array of random values to work with. I wouldn't be surprised if you ran out of heap space before it finished. If you knock off some zero's on the array size, you'll see that it does, in fact, finish.Ok, thanks! In class we didn't talk much about the heap. How do I calculate how much heap space my program will use and how to allocate more? I vaguely remember something like xmx512m or something as the parameter, but every time I get an error:
    Unrecognized option: -xmx512m
    Could not create the Java virtual machine.

  • If processing 30000 by 700000 array how long labview will take?

    I was just wandering if i can use labview or not for the following as the last day is tuesday.
    i have 700000 locations' latitude,longitude.and i have 30000 other locations'latitude,longitude.
    now i will take one by one location from 30000 and want to find all the locations out of 700000 which are within the 20 miles radius of my location(which is one from the 30000).
    i dont want to compare distance between one point and all the other 700000. instead can we do this comparison for some lesser values. i mean just a look at all 700000 and it may be reduced without comparing each and every value. is it possible?
    if not then--- 
    I have made a distance calculator using formula node and it works with 100000.( i have tested) now if i make it to calculate for 700000 and that also 30000 times, how long will it take, anybody has any idea? here time is restriction.
    i am using labview 8.6 evaluation version.

    janki wrote:
    I was just wandering if i can use labview or not for the following as the last day is tuesday.
    Of course you can use LabVIEW!
    If the problem fits into the memory contraint of a 32 bit OS, LabVIEW is equally sutable as any other programming language.
    As Joseph already mentioned, deciding on a good internal data structure is important for speed and efficiency. Make sure to keep the large data structures "in place" and avoid constantly making data copies in memory. I assume you have an efficient algorithm to calculate the distance between two points given their long/lat coordinates.
    Why don't you start with a scaled down program containing small arrays of artificial data and work out the main algorithm. Feel free to post it here so we can discuss they layout possibly offer improvments. Good luck!
    LabVIEW Champion . Do more with less code and in less time .

  • Keyword Search on all tables of a Schema

    Hi,
    My application incorporates a keyword search facility i.e. it should search all tables for that particular keyword and return matched strings. This should be implemented by dynamic queries from a procedure.
    Can anyone suggest me an efficient algorithm for the same?
    Thanks in advance

    A query like the one you wish to build may have nasty side effects in Oracle. I can post a quick suggestion... but I'm aware that it may pollute the SGA somewhat and thus cause some performance issues for the system. Consider it a starting point, ok?
    PROMPT create result table
    CREATE GLOBAL TEMPORARY TABLE keyword_search_result_gt
    (tname varchar2(30)
    ,cname varchar2(30)
    ,text  varchar2(4000)
    ) ON COMMIT PRESERVE ROWS
    PROMPT create search procedure
    CREATE OR REPLACE PROCEDURE wild_keyword_search( p_keyword IN VARCHAR2 )
    IS
    CURSOR c1 IS
    SELECT /*+cache */
      table_name
    ,column_name
    FROM
      user_tab_columns
    WHERE
      data_type = 'VARCHAR2'
    ORDER BY 1;
    BEGIN
    FOR x IN c1 LOOP
      EXECUTE IMMEDIATE 'INSERT INTO keyword_search_result_gt SELECT '''||x.TABLE_NAME||''', '''||x.COLUMN_NAME||''', '||x.COLUMN_NAME||' FROM '||x.TABLE_NAME||' WHERE '||x.COLUMN_NAME||' LIKE '''||'%'||p_keyword||'%'||'''';
    END LOOP;
    END;
    PROMPT cleanup
    DELETE keyword_search_result_gt
    PROMPT run a test searching for keyword 'TEXTABC'
    exec wild_keyword_search ('TEXTABC')
    PROMPT query output
    SELECT * FROM keyword_search_result_gt
    /<br>
    <br>
    It this an ok answer? Or do you consider it crap to be swapped with a scalable solution?

Maybe you are looking for

  • Apple mobile device service failed to start

    After months finally let itunes auto update its self. this didn't work due to the next error: Service 'Apple Mobile Device Service' (Apple Mobile Device Service) failed to stat. Verify that you have sufficient privileges to start system services. Tha

  • Adobe and windows 8

    new computer with windows 8.   Flash Player and photoshop elements not working.  Are they compatible with 8.  What do I need to do to get them to work.  I have the discs for photoshop but trying to download Flash Player,

  • Dreamweaver 8, PHP, and WebDAV problems

    I'm not sure where else to post this but maybe some of you folks out there might know the answer. My web server, running IIS 6, supports both Coldfusion and PHP. I've recently enabled WebDAV for two dreamweaver users who want to transfer files secure

  • Windows not installing!

    i have been trying to load windows 7 through bootcamp. but after the initial partition process, the screen goes blank with a cursor on the top left corner. it does not come to life even after 4 hours. i did not have any problem making the partition b

  • How can I extend the range of my chart?

    I've asked this question before on here, and found that shift-clicking for more cells has usually worked, but I have now run into a new problem... My chart used to cover the range B5:P50. However, since moving a column it now only covers the cell ran