mirror of
https://github.com/postgres/postgres.git
synced 2025-06-30 21:42:05 +03:00
Applied patches from Kris Jurka fixing a string tokenizing problem and
fixing an order by problem for index metadata results. Also includes removing some unused code as well as a fix to the toString method on statement. Modified Files: jdbc/org/postgresql/jdbc1/AbstractJdbc1DatabaseMetaData.java jdbc/org/postgresql/jdbc1/AbstractJdbc1Statement.java
This commit is contained in:
@ -3143,26 +3143,23 @@ public abstract class AbstractJdbc1DatabaseMetaData
|
||||
//<unnamed>\000ww\000vv\000UNSPECIFIED\000m\000a\000n\000b\000
|
||||
// we are primarily interested in the column names which are the last items in the string
|
||||
|
||||
StringTokenizer st = new StringTokenizer(targs, "\\000");
|
||||
if (st.hasMoreTokens()) {
|
||||
fkName = st.nextToken();
|
||||
Vector tokens = tokenize(targs, "\\000");
|
||||
if (tokens.size() > 0) {
|
||||
fkName = (String)tokens.elementAt(0);
|
||||
}
|
||||
|
||||
if (fkName.startsWith("<unnamed>")) {
|
||||
fkName = targs;
|
||||
}
|
||||
|
||||
int advance = 4 + (keySequence - 1) * 2;
|
||||
for ( int i = 1; st.hasMoreTokens() && i < advance ; i++ )
|
||||
st.nextToken(); // advance to the key column of interest
|
||||
|
||||
if ( st.hasMoreTokens() )
|
||||
{
|
||||
fkeyColumn = st.nextToken();
|
||||
int element = 4 + (keySequence - 1) * 2;
|
||||
if (tokens.size() > element) {
|
||||
fkeyColumn = (String)tokens.elementAt(element);
|
||||
}
|
||||
if ( st.hasMoreTokens() )
|
||||
{
|
||||
pkeyColumn = st.nextToken();
|
||||
|
||||
element++;
|
||||
if (tokens.size() > element) {
|
||||
pkeyColumn = (String)tokens.elementAt(element);
|
||||
}
|
||||
|
||||
tuple[3] = pkeyColumn.getBytes(); //PKCOLUMN_NAME
|
||||
@ -3568,8 +3565,33 @@ public abstract class AbstractJdbc1DatabaseMetaData
|
||||
if (unique) {
|
||||
sql += " AND i.indisunique ";
|
||||
}
|
||||
sql += " ORDER BY NON_UNIQUE, TYPE, INDEX_NAME ";
|
||||
sql += " ORDER BY NON_UNIQUE, TYPE, INDEX_NAME, ORDINAL_POSITION ";
|
||||
return connection.createStatement().executeQuery(sql);
|
||||
}
|
||||
|
||||
/**
|
||||
* Tokenize based on words not on single characters.
|
||||
*/
|
||||
private static Vector tokenize(String input, String delimiter) {
|
||||
Vector result = new Vector();
|
||||
int start = 0;
|
||||
int end = input.length();
|
||||
int delimiterSize = delimiter.length();
|
||||
|
||||
while (start < end) {
|
||||
int delimiterIndex = input.indexOf(delimiter,start);
|
||||
if (delimiterIndex < 0) {
|
||||
result.addElement(input.substring(start));
|
||||
break;
|
||||
} else {
|
||||
String token = input.substring(start,delimiterIndex);
|
||||
result.addElement(token);
|
||||
start = delimiterIndex + delimiterSize;
|
||||
}
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
|
||||
|
||||
}
|
||||
|
Reference in New Issue
Block a user