// © 2016 and later: Unicode, Inc. and others. // License & terms of use: http://www.unicode.org/copyright.html /* ************************************************************************ * Copyright (c) 1997-2016, International Business Machines * Corporation and others. All Rights Reserved. ************************************************************************ */ #include "unicode/utypes.h" #if !UCONFIG_NO_NORMALIZATION #include #include "unicode/bytestream.h" #include "unicode/edits.h" #include "unicode/uchar.h" #include "unicode/normalizer2.h" #include "unicode/normlzr.h" #include "unicode/uniset.h" #include "unicode/putil.h" #include "cmemory.h" #include "cstring.h" #include "filestrm.h" #include "normconf.h" #include "uassert.h" #include void NormalizerConformanceTest::runIndexedTest(int32_t index, UBool exec, const char* &name, char* /*par*/) { TESTCASE_AUTO_BEGIN; TESTCASE_AUTO(TestConformance); TESTCASE_AUTO(TestConformance32); TESTCASE_AUTO(TestCase6); TESTCASE_AUTO_END; } #define FIELD_COUNT 5 NormalizerConformanceTest::NormalizerConformanceTest() : normalizer(UnicodeString(), UNORM_NFC) { UErrorCode errorCode = U_ZERO_ERROR; nfc = Normalizer2::getNFCInstance(errorCode); nfd = Normalizer2::getNFDInstance(errorCode); nfkc = Normalizer2::getNFKCInstance(errorCode); nfkd = Normalizer2::getNFKDInstance(errorCode); assertSuccess("", errorCode, true, __FILE__, __LINE__); } NormalizerConformanceTest::~NormalizerConformanceTest() {} // more interesting conformance test cases, not in the unicode.org NormalizationTest.txt static const char *moreCases[]={ // Markus 2001aug30 "0061 0332 0308;00E4 0332;0061 0332 0308;00E4 0332;0061 0332 0308; # Markus 0", // Markus 2001oct26 - test edge case for iteration: U+0f73.cc==0 but decomposition.lead.cc==129 "0061 0301 0F73;00E1 0F71 0F72;0061 0F71 0F72 0301;00E1 0F71 0F72;0061 0F71 0F72 0301; # Markus 1" }; void NormalizerConformanceTest::compare(const UnicodeString& s1, const UnicodeString& s2){ UErrorCode status=U_ZERO_ERROR; // TODO: Re-enable this tests after UTC fixes UAX 21 if(s1.indexOf((UChar32)0x0345)>=0)return; if(Normalizer::compare(s1,s2,U_FOLD_CASE_DEFAULT,status)!=0){ errln("Normalizer::compare() failed for s1: " + prettify(s1) + " s2: " +prettify(s2)); } } FileStream * NormalizerConformanceTest::openNormalizationTestFile(const char *filename) { char unidataPath[2000]; const char *folder; FileStream *input; UErrorCode errorCode; // look inside ICU_DATA first folder=pathToDataDirectory(); if(folder!=NULL) { strcpy(unidataPath, folder); strcat(unidataPath, "unidata" U_FILE_SEP_STRING); strcat(unidataPath, filename); input=T_FileStream_open(unidataPath, "rb"); if(input!=NULL) { return input; } } // find icu/source/data/unidata relative to the test data errorCode=U_ZERO_ERROR; folder=loadTestData(errorCode); if(U_SUCCESS(errorCode)) { strcpy(unidataPath, folder); strcat(unidataPath, U_FILE_SEP_STRING ".." U_FILE_SEP_STRING ".." U_FILE_SEP_STRING ".." U_FILE_SEP_STRING ".." U_FILE_SEP_STRING "data" U_FILE_SEP_STRING "unidata" U_FILE_SEP_STRING); strcat(unidataPath, filename); input=T_FileStream_open(unidataPath, "rb"); if(input!=NULL) { return input; } } // look in icu/source/test/testdata/out/build errorCode=U_ZERO_ERROR; folder=loadTestData(errorCode); if(U_SUCCESS(errorCode)) { strcpy(unidataPath, folder); strcat(unidataPath, U_FILE_SEP_STRING); strcat(unidataPath, filename); input=T_FileStream_open(unidataPath, "rb"); if(input!=NULL) { return input; } } // look in icu/source/test/testdata errorCode=U_ZERO_ERROR; folder=loadTestData(errorCode); if(U_SUCCESS(errorCode)) { strcpy(unidataPath, folder); strcat(unidataPath, U_FILE_SEP_STRING ".." U_FILE_SEP_STRING ".." U_FILE_SEP_STRING); strcat(unidataPath, filename); input=T_FileStream_open(unidataPath, "rb"); if(input!=NULL) { return input; } } // find icu/source/data/unidata relative to U_TOPSRCDIR #if defined(U_TOPSRCDIR) strcpy(unidataPath, U_TOPSRCDIR U_FILE_SEP_STRING "data" U_FILE_SEP_STRING "unidata" U_FILE_SEP_STRING); strcat(unidataPath, filename); input=T_FileStream_open(unidataPath, "rb"); if(input!=NULL) { return input; } strcpy(unidataPath, U_TOPSRCDIR U_FILE_SEP_STRING "test" U_FILE_SEP_STRING "testdata" U_FILE_SEP_STRING); strcat(unidataPath, filename); input=T_FileStream_open(unidataPath, "rb"); if(input!=NULL) { return input; } #endif dataerrln("Failed to open %s", filename); return NULL; } /** * Test the conformance of Normalizer to * http://www.unicode.org/Public/UNIDATA/NormalizationTest.txt */ void NormalizerConformanceTest::TestConformance() { TestConformance(openNormalizationTestFile("NormalizationTest.txt"), 0); } void NormalizerConformanceTest::TestConformance32() { TestConformance(openNormalizationTestFile("NormalizationTest-3.2.0.txt"), UNORM_UNICODE_3_2); } void NormalizerConformanceTest::TestConformance(FileStream *input, int32_t options) { enum { BUF_SIZE = 1024 }; char lineBuf[BUF_SIZE]; UnicodeString fields[FIELD_COUNT]; UErrorCode status = U_ZERO_ERROR; int32_t passCount = 0; int32_t failCount = 0; UChar32 c; if(input==NULL) { return; } // UnicodeSet for all code points that are not mentioned in NormalizationTest.txt UnicodeSet other(0, 0x10ffff); int32_t count, countMoreCases = UPRV_LENGTHOF(moreCases); for (count = 1;;++count) { if (!T_FileStream_eof(input)) { T_FileStream_readLine(input, lineBuf, (int32_t)sizeof(lineBuf)); } else { // once NormalizationTest.txt is finished, use moreCases[] if(count > countMoreCases) { count = 0; } else if(count == countMoreCases) { // all done break; } uprv_strcpy(lineBuf, moreCases[count]); } if (lineBuf[0] == 0 || lineBuf[0] == '\n' || lineBuf[0] == '\r') continue; // Expect 5 columns of this format: // 1E0C;1E0C;0044 0323;1E0C;0044 0323; # // Parse out the comment. if (lineBuf[0] == '#') continue; // Read separator lines starting with '@' if (lineBuf[0] == '@') { logln(lineBuf); continue; } // Parse out the fields if (!hexsplit(lineBuf, ';', fields, FIELD_COUNT)) { errln((UnicodeString)"Unable to parse line " + count); break; // Syntax error } // Remove a single code point from the "other" UnicodeSet if(fields[0].length()==fields[0].moveIndex32(0, 1)) { c=fields[0].char32At(0); if(0xac20<=c && c<=0xd73f && quick) { // not an exhaustive test run: skip most Hangul syllables if(c==0xac20) { other.remove(0xac20, 0xd73f); } continue; } other.remove(c); } if (checkConformance(fields, lineBuf, options, status)) { ++passCount; } else { ++failCount; if(status == U_FILE_ACCESS_ERROR) { dataerrln("Something is wrong with the normalizer, skipping the rest of the test."); break; } } if ((count % 1000) == 0) { logln("Line %d", count); } } T_FileStream_close(input); /* * Test that all characters that are not mentioned * as single code points in column 1 * do not change under any normalization. */ // remove U+ffff because that is the end-of-iteration sentinel value other.remove(0xffff); for(c=0; c<=0x10ffff; quick ? c+=113 : ++c) { if(0x30000<=c && c<0xe0000) { c=0xe0000; } if(!other.contains(c)) { continue; } fields[0]=fields[1]=fields[2]=fields[3]=fields[4].setTo(c); sprintf(lineBuf, "not mentioned code point U+%04lx", (long)c); if (checkConformance(fields, lineBuf, options, status)) { ++passCount; } else { ++failCount; if(status == U_FILE_ACCESS_ERROR) { dataerrln("Something is wrong with the normalizer, skipping the rest of the test.: %s", u_errorName(status)); break; } } if ((c % 0x1000) == 0) { logln("Code point U+%04lx", c); } } if (failCount != 0) { dataerrln((UnicodeString)"Total: " + failCount + " lines/code points failed, " + passCount + " lines/code points passed"); } else { logln((UnicodeString)"Total: " + passCount + " lines/code points passed"); } } namespace { UBool isNormalizedUTF8(const Normalizer2 &norm2, const UnicodeString &s, UErrorCode &errorCode) { std::string s8; return norm2.isNormalizedUTF8(s.toUTF8String(s8), errorCode); } } // namespace /** * Verify the conformance of the given line of the Unicode * normalization (UTR 15) test suite file. For each line, * there are five columns, corresponding to field[0]..field[4]. * * The following invariants must be true for all conformant implementations * c2 == NFC(c1) == NFC(c2) == NFC(c3) * c3 == NFD(c1) == NFD(c2) == NFD(c3) * c4 == NFKC(c1) == NFKC(c2) == NFKC(c3) == NFKC(c4) == NFKC(c5) * c5 == NFKD(c1) == NFKD(c2) == NFKD(c3) == NFKD(c4) == NFKD(c5) * * @param field the 5 columns * @param line the source line from the test suite file * @return true if the test passes */ UBool NormalizerConformanceTest::checkConformance(const UnicodeString* field, const char *line, int32_t options, UErrorCode &status) { UBool pass = TRUE, result; UnicodeString out, fcd; int32_t fieldNum; for (int32_t i=0; i sink(&out8, static_cast(exp8.length())); norm2->normalizeUTF8(0, s8, sink, editsPtr, errorCode); if (U_FAILURE(errorCode)) { errln("Normalizer2.%s.normalizeUTF8(%s) failed: %s", modeString, s8.c_str(), u_errorName(errorCode)); return FALSE; } if (out8 != exp8) { errln("Normalizer2.%s.normalizeUTF8(%s)=%s != %s", modeString, s8.c_str(), out8.c_str(), exp8.c_str()); return FALSE; } if (editsPtr == nullptr) { return TRUE; } // Do the Edits cover the entire input & output? UBool pass = TRUE; pass &= assertEquals("edits.hasChanges()", (UBool)(s8 != out8), edits.hasChanges()); pass &= assertEquals("edits.lengthDelta()", (int32_t)(out8.length() - s8.length()), edits.lengthDelta()); Edits::Iterator iter = edits.getCoarseIterator(); while (iter.next(errorCode)) {} pass &= assertEquals("edits source length", static_cast(s8.length()), iter.sourceIndex()); pass &= assertEquals("edits destination length", static_cast(out8.length()), iter.destinationIndex()); return pass; } /** * Do a normalization using the iterative API in the given direction. * @param dir either +1 or -1 */ void NormalizerConformanceTest::iterativeNorm(const UnicodeString& str, UNormalizationMode mode, int32_t options, UnicodeString& result, int8_t dir) { UErrorCode status = U_ZERO_ERROR; normalizer.setText(str, status); normalizer.setMode(mode); normalizer.setOption(-1, 0); // reset all options normalizer.setOption(options, 1); // set desired options result.truncate(0); if (U_FAILURE(status)) { return; } UChar32 ch; if (dir > 0) { for (ch = normalizer.first(); ch != Normalizer::DONE; ch = normalizer.next()) { result.append(ch); } } else { for (ch = normalizer.last(); ch != Normalizer::DONE; ch = normalizer.previous()) { result.insert(0, ch); } } } UBool NormalizerConformanceTest::assertEqual(const char *op, const char *op2, const UnicodeString& s, const UnicodeString& got, const UnicodeString& exp, const char *msg) { if (exp == got) return TRUE; char *sChars, *gotChars, *expChars; UnicodeString sPretty(prettify(s)); UnicodeString gotPretty(prettify(got)); UnicodeString expPretty(prettify(exp)); sChars = new char[sPretty.length() + 1]; gotChars = new char[gotPretty.length() + 1]; expChars = new char[expPretty.length() + 1]; sPretty.extract(0, sPretty.length(), sChars, sPretty.length() + 1); sChars[sPretty.length()] = 0; gotPretty.extract(0, gotPretty.length(), gotChars, gotPretty.length() + 1); gotChars[gotPretty.length()] = 0; expPretty.extract(0, expPretty.length(), expChars, expPretty.length() + 1); expChars[expPretty.length()] = 0; errln(" %s: %s%s(%s)=%s, exp. %s", msg, op, op2, sChars, gotChars, expChars); delete []sChars; delete []gotChars; delete []expChars; return FALSE; } /** * Split a string into pieces based on the given delimiter * character. Then, parse the resultant fields from hex into * characters. That is, "0040 0400;0C00;0899" -> new String[] { * "\u0040\u0400", "\u0C00", "\u0899" }. The output is assumed to * be of the proper length already, and exactly output.length * fields are parsed. If there are too few an exception is * thrown. If there are too many the extras are ignored. * * @return FALSE upon failure */ UBool NormalizerConformanceTest::hexsplit(const char *s, char delimiter, UnicodeString output[], int32_t outputLength) { const char *t = s; char *end = NULL; UChar32 c; int32_t i; for (i=0; i 0x10ffff || (*end != ' ' && *end != '\t' && *end != delimiter) ) { errln(UnicodeString("Bad field ", "") + (i + 1) + " in " + UnicodeString(s, "")); return FALSE; } output[i].append(c); t = (const char *)end; // skip whitespace while(*t == ' ' || *t == '\t') { ++t; } if(*t == delimiter) { ++t; break; } if(*t == 0) { if((i + 1) == outputLength) { return TRUE; } else { errln(UnicodeString("Missing field(s) in ", "") + s + " only " + (i + 1) + " out of " + outputLength); return FALSE; } } } } return TRUE; } // Specific tests for debugging. These are generally failures taken from // the conformance file, but culled out to make debugging easier. void NormalizerConformanceTest::TestCase6(void) { _testOneLine("0385;0385;00A8 0301;0020 0308 0301;0020 0308 0301;"); } void NormalizerConformanceTest::_testOneLine(const char *line) { UErrorCode status = U_ZERO_ERROR; UnicodeString fields[FIELD_COUNT]; if (!hexsplit(line, ';', fields, FIELD_COUNT)) { errln((UnicodeString)"Unable to parse line " + line); } else { checkConformance(fields, line, 0, status); } } #endif /* #if !UCONFIG_NO_NORMALIZATION */