diff --git a/src/common/unicode/Makefile b/src/common/unicode/Makefile index 30cd75cc6a..04d81dd5cb 100644 --- a/src/common/unicode/Makefile +++ b/src/common/unicode/Makefile @@ -21,7 +21,7 @@ CPPFLAGS += $(ICU_CFLAGS) # By default, do nothing. all: -update-unicode: unicode_category_table.h unicode_norm_table.h unicode_nonspacing_table.h unicode_east_asian_fw_table.h unicode_normprops_table.h unicode_norm_hashfunc.h unicode_version.h +update-unicode: unicode_category_table.h unicode_east_asian_fw_table.h unicode_nonspacing_table.h unicode_norm_hashfunc.h unicode_norm_table.h unicode_normprops_table.h unicode_version.h mv $^ $(top_srcdir)/src/include/common/ $(MAKE) category-check $(MAKE) normalization-check @@ -29,7 +29,7 @@ update-unicode: unicode_category_table.h unicode_norm_table.h unicode_nonspacing # These files are part of the Unicode Character Database. Download # them on demand. The dependency on Makefile.global is for # UNICODE_VERSION. -UnicodeData.txt EastAsianWidth.txt DerivedNormalizationProps.txt CompositionExclusions.txt NormalizationTest.txt: $(top_builddir)/src/Makefile.global +CompositionExclusions.txt DerivedNormalizationProps.txt EastAsianWidth.txt NormalizationTest.txt UnicodeData.txt: $(top_builddir)/src/Makefile.global $(DOWNLOAD) https://www.unicode.org/Public/$(UNICODE_VERSION)/ucd/$(@F) unicode_version.h: generate-unicode_version.pl @@ -82,4 +82,4 @@ clean: rm -f $(OBJS) category_test category_test.o norm_test norm_test.o distclean: clean - rm -f UnicodeData.txt EastAsianWidth.txt CompositionExclusions.txt NormalizationTest.txt norm_test_table.h unicode_norm_table.h + rm -f CompositionExclusions.txt DerivedNormalizationProps.txt EastAsianWidth.txt NormalizationTest.txt UnicodeData.txt norm_test_table.h unicode_category_table.h unicode_norm_table.h diff --git a/src/common/unicode/category_test.c b/src/common/unicode/category_test.c index 6cd7cd1a5f..f1aaac0f61 100644 --- a/src/common/unicode/category_test.c +++ b/src/common/unicode/category_test.c @@ -28,9 +28,9 @@ static int parse_unicode_version(const char *version) { - int n, - major, - minor; + int n PG_USED_FOR_ASSERTS_ONLY; + int major; + int minor; n = sscanf(version, "%d.%d", &major, &minor); @@ -54,8 +54,8 @@ main(int argc, char **argv) int pg_skipped_codepoints = 0; int icu_skipped_codepoints = 0; - printf("Postgres Unicode Version:\t%s\n", PG_UNICODE_VERSION); - printf("ICU Unicode Version:\t\t%s\n", U_UNICODE_VERSION); + printf("category_test: Postgres Unicode version:\t%s\n", PG_UNICODE_VERSION); + printf("category_test: ICU Unicode version:\t\t%s\n", U_UNICODE_VERSION); for (UChar32 code = 0; code <= 0x10ffff; code++) { @@ -79,11 +79,11 @@ main(int argc, char **argv) icu_skipped_codepoints++; else { - printf("FAILURE for codepoint %06x\n", code); - printf("Postgres category: %02d %s %s\n", pg_category, + printf("category_test: FAILURE for codepoint 0x%06x\n", code); + printf("category_test: Postgres category: %02d %s %s\n", pg_category, unicode_category_abbrev(pg_category), unicode_category_string(pg_category)); - printf("ICU category: %02d %s %s\n", icu_category, + printf("category_test: ICU category: %02d %s %s\n", icu_category, unicode_category_abbrev(icu_category), unicode_category_string(icu_category)); printf("\n"); @@ -93,16 +93,16 @@ main(int argc, char **argv) } if (pg_skipped_codepoints > 0) - printf("Skipped %d codepoints unassigned in Postgres due to Unicode version mismatch.\n", + printf("category_test: skipped %d codepoints unassigned in Postgres due to Unicode version mismatch\n", pg_skipped_codepoints); if (icu_skipped_codepoints > 0) - printf("Skipped %d codepoints unassigned in ICU due to Unicode version mismatch.\n", + printf("category_test: skipped %d codepoints unassigned in ICU due to Unicode version mismatch\n", icu_skipped_codepoints); - printf("category_test: All tests successful!\n"); + printf("category_test: success\n"); exit(0); #else - printf("ICU support required for test; skipping.\n"); + printf("category_test: ICU support required for test; skipping\n"); exit(0); #endif } diff --git a/src/common/unicode/meson.build b/src/common/unicode/meson.build index 02e07cf4f4..df4f3a4ed1 100644 --- a/src/common/unicode/meson.build +++ b/src/common/unicode/meson.build @@ -11,7 +11,7 @@ endif # These files are part of the Unicode Character Database. Download them on # demand. -foreach f : ['UnicodeData.txt', 'EastAsianWidth.txt', 'DerivedNormalizationProps.txt', 'CompositionExclusions.txt', 'NormalizationTest.txt'] +foreach f : ['CompositionExclusions.txt', 'DerivedNormalizationProps.txt', 'EastAsianWidth.txt', 'NormalizationTest.txt', 'UnicodeData.txt'] url = unicode_baseurl.format(UNICODE_VERSION, f) target = custom_target(f, output: f, @@ -24,15 +24,6 @@ endforeach update_unicode_targets = [] -update_unicode_targets += \ - custom_target('unicode_version.h', - output: ['unicode_version.h'], - command: [ - perl, files('generate-unicode_version.pl'), - '--outdir', '@OUTDIR@', '--version', UNICODE_VERSION], - build_by_default: false, - ) - update_unicode_targets += \ custom_target('unicode_category_table.h', input: [unicode_data['UnicodeData.txt']], @@ -43,6 +34,25 @@ update_unicode_targets += \ build_by_default: false, ) +update_unicode_targets += \ + custom_target('unicode_east_asian_fw_table.h', + input: [unicode_data['EastAsianWidth.txt']], + output: ['unicode_east_asian_fw_table.h'], + command: [perl, files('generate-unicode_east_asian_fw_table.pl'), '@INPUT@'], + build_by_default: false, + capture: true, + ) + +update_unicode_targets += \ + custom_target('unicode_nonspacing_table.h', + input: [unicode_data['UnicodeData.txt']], + output: ['unicode_nonspacing_table.h'], + depend_files: perfect_hash_pm, + command: [perl, files('generate-unicode_nonspacing_table.pl'), '@INPUT@'], + build_by_default: false, + capture: true, + ) + update_unicode_targets += \ custom_target('unicode_norm_table.h', input: [unicode_data['UnicodeData.txt'], unicode_data['CompositionExclusions.txt']], @@ -54,25 +64,6 @@ update_unicode_targets += \ build_by_default: false, ) -update_unicode_targets += \ - custom_target('unicode_nonspacing_table.h', - input: [unicode_data['UnicodeData.txt']], - output: ['unicode_nonspacing_table.h'], - depend_files: perfect_hash_pm, - command: [perl, files('generate-unicode_nonspacing_table.pl'), '@INPUT@'], - build_by_default: false, - capture: true, - ) - -update_unicode_targets += \ - custom_target('unicode_east_asian_fw_table.h', - input: [unicode_data['EastAsianWidth.txt']], - output: ['unicode_east_asian_fw_table.h'], - command: [perl, files('generate-unicode_east_asian_fw_table.pl'), '@INPUT@'], - build_by_default: false, - capture: true, - ) - update_unicode_targets += \ custom_target('unicode_normprops_table.h', input: [unicode_data['DerivedNormalizationProps.txt']], @@ -83,6 +74,15 @@ update_unicode_targets += \ capture: true, ) +update_unicode_targets += \ + custom_target('unicode_version.h', + output: ['unicode_version.h'], + command: [ + perl, files('generate-unicode_version.pl'), + '--outdir', '@OUTDIR@', '--version', UNICODE_VERSION], + build_by_default: false, + ) + norm_test_table = custom_target('norm_test_table.h', input: [unicode_data['NormalizationTest.txt']], output: ['norm_test_table.h'],