[CHG] Deleted some unused files in FreeType source folder

This commit is contained in:
Crayon2000 2010-02-16 05:37:35 +00:00
parent 07a6685a0e
commit 5965330dc8
19 changed files with 0 additions and 9302 deletions

View file

@ -1,20 +0,0 @@
/* This is a generated file. */
FT_USE_MODULE( FT_Driver_ClassRec, tt_driver_class )
FT_USE_MODULE( FT_Driver_ClassRec, t1_driver_class )
FT_USE_MODULE( FT_Driver_ClassRec, cff_driver_class )
FT_USE_MODULE( FT_Driver_ClassRec, t1cid_driver_class )
FT_USE_MODULE( FT_Driver_ClassRec, pfr_driver_class )
FT_USE_MODULE( FT_Driver_ClassRec, t42_driver_class )
FT_USE_MODULE( FT_Driver_ClassRec, winfnt_driver_class )
FT_USE_MODULE( FT_Driver_ClassRec, pcf_driver_class )
FT_USE_MODULE( FT_Driver_ClassRec, bdf_driver_class )
FT_USE_MODULE( FT_Module_Class, sfnt_module_class )
FT_USE_MODULE( FT_Module_Class, autofit_module_class )
FT_USE_MODULE( FT_Module_Class, pshinter_module_class )
FT_USE_MODULE( FT_Renderer_Class, ft_raster1_renderer_class )
FT_USE_MODULE( FT_Renderer_Class, ft_smooth_renderer_class )
FT_USE_MODULE( FT_Renderer_Class, ft_smooth_lcd_renderer_class )
FT_USE_MODULE( FT_Renderer_Class, ft_smooth_lcdv_renderer_class )
FT_USE_MODULE( FT_Module_Class, psaux_module_class )
FT_USE_MODULE( FT_Module_Class, psnames_module_class )
/* EOF */

View file

@ -1,443 +0,0 @@
/*
* This little program is used to parse the FreeType headers and
* find the declaration of all public APIs. This is easy, because
* they all look like the following:
*
* FT_EXPORT( return_type )
* function_name( function arguments );
*
* You must pass the list of header files as arguments. Wildcards are
* accepted if you are using GCC for compilation (and probably by
* other compilers too).
*
* Author: David Turner, 2005, 2006, 2008, 2009
*
* This code is explicitly placed into the public domain.
*
*/
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <ctype.h>
#define PROGRAM_NAME "apinames"
#define PROGRAM_VERSION "0.1"
#define LINEBUFF_SIZE 1024
typedef enum OutputFormat_
{
OUTPUT_LIST = 0, /* output the list of names, one per line */
OUTPUT_WINDOWS_DEF, /* output a Windows .DEF file for Visual C++ or Mingw */
OUTPUT_BORLAND_DEF, /* output a Windows .DEF file for Borland C++ */
OUTPUT_WATCOM_LBC /* output a Watcom Linker Command File */
} OutputFormat;
static void
panic( const char* message )
{
fprintf( stderr, "PANIC: %s\n", message );
exit(2);
}
typedef struct NameRec_
{
char* name;
unsigned int hash;
} NameRec, *Name;
static Name the_names;
static int num_names;
static int max_names;
static void
names_add( const char* name,
const char* end )
{
int nn, len, h;
Name nm;
if ( end <= name )
return;
/* compute hash value */
len = (int)(end - name);
h = 0;
for ( nn = 0; nn < len; nn++ )
h = h*33 + name[nn];
/* check for an pre-existing name */
for ( nn = 0; nn < num_names; nn++ )
{
nm = the_names + nn;
if ( (int)nm->hash == h &&
memcmp( name, nm->name, len ) == 0 &&
nm->name[len] == 0 )
return;
}
/* add new name */
if ( num_names >= max_names )
{
max_names += (max_names >> 1) + 4;
the_names = (NameRec*)realloc( the_names, sizeof(the_names[0])*max_names );
if ( the_names == NULL )
panic( "not enough memory" );
}
nm = &the_names[num_names++];
nm->hash = h;
nm->name = (char*)malloc( len+1 );
if ( nm->name == NULL )
panic( "not enough memory" );
memcpy( nm->name, name, len );
nm->name[len] = 0;
}
static int
name_compare( const void* name1,
const void* name2 )
{
Name n1 = (Name)name1;
Name n2 = (Name)name2;
return strcmp( n1->name, n2->name );
}
static void
names_sort( void )
{
qsort( the_names, (size_t)num_names, sizeof(the_names[0]), name_compare );
}
static void
names_dump( FILE* out,
OutputFormat format,
const char* dll_name )
{
int nn;
switch ( format )
{
case OUTPUT_WINDOWS_DEF:
if ( dll_name )
fprintf( out, "LIBRARY %s\n", dll_name );
fprintf( out, "DESCRIPTION FreeType 2 DLL\n" );
fprintf( out, "EXPORTS\n" );
for ( nn = 0; nn < num_names; nn++ )
fprintf( out, " %s\n", the_names[nn].name );
break;
case OUTPUT_BORLAND_DEF:
if ( dll_name )
fprintf( out, "LIBRARY %s\n", dll_name );
fprintf( out, "DESCRIPTION FreeType 2 DLL\n" );
fprintf( out, "EXPORTS\n" );
for ( nn = 0; nn < num_names; nn++ )
fprintf( out, " _%s\n", the_names[nn].name );
break;
case OUTPUT_WATCOM_LBC:
{
/* we must omit the .dll suffix from the library name */
char temp[512];
char* dot;
if ( dll_name == NULL )
{
fprintf( stderr,
"you must provide a DLL name with the -d option !!\n" );
exit(4);
}
dot = strchr( dll_name, '.' );
if ( dot != NULL )
{
int len = (dot - dll_name);
if ( len > (int)(sizeof(temp)-1) )
len = sizeof(temp)-1;
memcpy( temp, dll_name, len );
temp[len] = 0;
dll_name = (const char*)temp;
}
for ( nn = 0; nn < num_names; nn++ )
fprintf( out, "++_%s.%s.%s\n", the_names[nn].name, dll_name,
the_names[nn].name );
}
break;
default: /* LIST */
for ( nn = 0; nn < num_names; nn++ )
fprintf( out, "%s\n", the_names[nn].name );
}
}
/* states of the line parser */
typedef enum State_
{
STATE_START = 0, /* waiting for FT_EXPORT keyword and return type */
STATE_TYPE /* type was read, waiting for function name */
} State;
static int
read_header_file( FILE* file, int verbose )
{
static char buff[ LINEBUFF_SIZE+1 ];
State state = STATE_START;
while ( !feof( file ) )
{
char* p;
if ( !fgets( buff, LINEBUFF_SIZE, file ) )
break;
p = buff;
while ( *p && (*p == ' ' || *p == '\\') ) /* skip leading whitespace */
p++;
if ( *p == '\n' || *p == '\r' ) /* skip empty lines */
continue;
switch ( state )
{
case STATE_START:
{
if ( memcmp( p, "FT_EXPORT(", 10 ) != 0 )
break;
p += 10;
for (;;)
{
if ( *p == 0 || *p == '\n' || *p == '\r' )
goto NextLine;
if ( *p == ')' )
{
p++;
break;
}
p++;
}
state = STATE_TYPE;
/* sometimes, the name is just after the FT_EXPORT(...), so
* skip whitespace, and fall-through if we find an alphanumeric
* character
*/
while ( *p == ' ' || *p == '\t' )
p++;
if ( !isalpha(*p) )
break;
}
/* fall-through */
case STATE_TYPE:
{
char* name = p;
while ( isalnum(*p) || *p == '_' )
p++;
if ( p > name )
{
if ( verbose )
fprintf( stderr, ">>> %.*s\n", (int)(p - name), name );
names_add( name, p );
}
state = STATE_START;
}
break;
default:
;
}
NextLine:
;
}
return 0;
}
static void
usage( void )
{
static const char* const format =
"%s %s: extract FreeType API names from header files\n\n"
"this program is used to extract the list of public FreeType API\n"
"functions. It receives the list of header files as argument and\n"
"generates a sorted list of unique identifiers\n\n"
"usage: %s header1 [options] [header2 ...]\n\n"
"options: - : parse the content of stdin, ignore arguments\n"
" -v : verbose mode, output sent to standard error\n"
" -oFILE : write output to FILE instead of standard output\n"
" -dNAME : indicate DLL file name, 'freetype.dll' by default\n"
" -w : output .DEF file for Visual C++ and Mingw\n"
" -wB : output .DEF file for Borland C++\n"
" -wW : output Watcom Linker Response File\n"
"\n";
fprintf( stderr,
format,
PROGRAM_NAME,
PROGRAM_VERSION,
PROGRAM_NAME
);
exit(1);
}
int main( int argc, const char* const* argv )
{
int from_stdin = 0;
int verbose = 0;
OutputFormat format = OUTPUT_LIST; /* the default */
FILE* out = stdout;
const char* library_name = NULL;
if ( argc < 2 )
usage();
/* '-' used as a single argument means read source file from stdin */
while ( argc > 1 && argv[1][0] == '-' )
{
const char* arg = argv[1];
switch ( arg[1] )
{
case 'v':
verbose = 1;
break;
case 'o':
if ( arg[2] == 0 )
{
if ( argc < 2 )
usage();
arg = argv[2];
argv++;
argc--;
}
else
arg += 2;
out = fopen( arg, "wt" );
if ( out == NULL )
{
fprintf( stderr, "could not open '%s' for writing\n", argv[2] );
exit(3);
}
break;
case 'd':
if ( arg[2] == 0 )
{
if ( argc < 2 )
usage();
arg = argv[2];
argv++;
argc--;
}
else
arg += 2;
library_name = arg;
break;
case 'w':
format = OUTPUT_WINDOWS_DEF;
switch ( arg[2] )
{
case 'B':
format = OUTPUT_BORLAND_DEF;
break;
case 'W':
format = OUTPUT_WATCOM_LBC;
break;
case 0:
break;
default:
usage();
}
break;
case 0:
from_stdin = 1;
break;
default:
usage();
}
argc--;
argv++;
}
if ( from_stdin )
{
read_header_file( stdin, verbose );
}
else
{
for ( --argc, argv++; argc > 0; argc--, argv++ )
{
FILE* file = fopen( argv[0], "rb" );
if ( file == NULL )
fprintf( stderr, "unable to open '%s'\n", argv[0] );
else
{
if ( verbose )
fprintf( stderr, "opening '%s'\n", argv[0] );
read_header_file( file, verbose );
fclose( file );
}
}
}
if ( num_names == 0 )
panic( "could not find exported functions !!\n" );
names_sort();
names_dump( out, format, library_name );
if ( out != stdout )
fclose( out );
return 0;
}

View file

@ -1,114 +0,0 @@
#!/usr/bin/env python
#
# Check trace components in FreeType 2 source.
# Author: suzuki toshiya, 2009
#
# This code is explicitly into the public domain.
import sys
import os
import re
SRC_FILE_LIST = []
USED_COMPONENT = {}
KNOWN_COMPONENT = {}
SRC_FILE_DIRS = [ "src" ]
TRACE_DEF_FILES = [ "include/freetype/internal/fttrace.h" ]
# --------------------------------------------------------------
# Parse command line options
#
for i in range( 1, len( sys.argv ) ):
if sys.argv[i].startswith( "--help" ):
print "Usage: %s [option]" % sys.argv[0]
print "Search used-but-defined and defined-but-not-used trace_XXX macros"
print ""
print " --help:"
print " Show this help"
print ""
print " --src-dirs=dir1:dir2:..."
print " Specify the directories of C source files to be checked"
print " Default is %s" % ":".join( SRC_FILE_DIRS )
print ""
print " --def-files=file1:file2:..."
print " Specify the header files including FT_TRACE_DEF()"
print " Default is %s" % ":".join( TRACE_DEF_FILES )
print ""
exit(0)
if sys.argv[i].startswith( "--src-dirs=" ):
SRC_FILE_DIRS = sys.argv[i].replace( "--src-dirs=", "", 1 ).split( ":" )
elif sys.argv[i].startswith( "--def-files=" ):
TRACE_DEF_FILES = sys.argv[i].replace( "--def-files=", "", 1 ).split( ":" )
# --------------------------------------------------------------
# Scan C source and header files using trace macros.
#
c_pathname_pat = re.compile( '^.*\.[ch]$', re.IGNORECASE )
trace_use_pat = re.compile( '^[ \t]*#define[ \t]+FT_COMPONENT[ \t]+trace_' )
for d in SRC_FILE_DIRS:
for ( p, dlst, flst ) in os.walk( d ):
for f in flst:
if c_pathname_pat.match( f ) != None:
src_pathname = os.path.join( p, f )
line_num = 0
for src_line in open( src_pathname, 'r' ):
line_num = line_num + 1
src_line = src_line.strip()
if trace_use_pat.match( src_line ) != None:
component_name = trace_use_pat.sub( '', src_line )
if component_name in USED_COMPONENT:
USED_COMPONENT[component_name].append( "%s:%d" % ( src_pathname, line_num ) )
else:
USED_COMPONENT[component_name] = [ "%s:%d" % ( src_pathname, line_num ) ]
# --------------------------------------------------------------
# Scan header file(s) defining trace macros.
#
trace_def_pat_opn = re.compile( '^.*FT_TRACE_DEF[ \t]*\([ \t]*' )
trace_def_pat_cls = re.compile( '[ \t\)].*$' )
for f in TRACE_DEF_FILES:
line_num = 0
for hdr_line in open( f, 'r' ):
line_num = line_num + 1
hdr_line = hdr_line.strip()
if trace_def_pat_opn.match( hdr_line ) != None:
component_name = trace_def_pat_opn.sub( '', hdr_line )
component_name = trace_def_pat_cls.sub( '', component_name )
if component_name in KNOWN_COMPONENT:
print "trace component %s is defined twice, see %s and fttrace.h:%d" % \
( component_name, KNOWN_COMPONENT[component_name], line_num )
else:
KNOWN_COMPONENT[component_name] = "%s:%d" % \
( os.path.basename( f ), line_num )
# --------------------------------------------------------------
# Compare the used and defined trace macros.
#
print "# Trace component used in the implementations but not defined in fttrace.h."
cmpnt = USED_COMPONENT.keys()
cmpnt.sort()
for c in cmpnt:
if c not in KNOWN_COMPONENT:
print "Trace component %s (used in %s) is not defined." % ( c, ", ".join( USED_COMPONENT[c] ) )
print "# Trace component is defined but not used in the implementations."
cmpnt = KNOWN_COMPONENT.keys()
cmpnt.sort()
for c in cmpnt:
if c not in USED_COMPONENT:
if c != "any":
print "Trace component %s (defined in %s) is not used." % ( c, KNOWN_COMPONENT[c] )

View file

@ -1,79 +0,0 @@
# compute arctangent table for CORDIC computations in fttrigon.c
import sys, math
#units = 64*65536.0 # don't change !!
units = 256
scale = units/math.pi
shrink = 1.0
comma = ""
def calc_val( x ):
global units, shrink
angle = math.atan(x)
shrink = shrink * math.cos(angle)
return angle/math.pi * units
def print_val( n, x ):
global comma
lo = int(x)
hi = lo + 1
alo = math.atan(lo)
ahi = math.atan(hi)
ax = math.atan(2.0**n)
errlo = abs( alo - ax )
errhi = abs( ahi - ax )
if ( errlo < errhi ):
hi = lo
sys.stdout.write( comma + repr( int(hi) ) )
comma = ", "
print ""
print "table of arctan( 1/2^n ) for PI = " + repr(units/65536.0) + " units"
# compute range of "i"
r = [-1]
r = r + range(32)
for n in r:
if n >= 0:
x = 1.0/(2.0**n) # tangent value
else:
x = 2.0**(-n)
angle = math.atan(x) # arctangent
angle2 = angle*scale # arctangent in FT_Angle units
# determine which integer value for angle gives the best tangent
lo = int(angle2)
hi = lo + 1
tlo = math.tan(lo/scale)
thi = math.tan(hi/scale)
errlo = abs( tlo - x )
errhi = abs( thi - x )
angle2 = hi
if errlo < errhi:
angle2 = lo
if angle2 <= 0:
break
sys.stdout.write( comma + repr( int(angle2) ) )
comma = ", "
shrink = shrink * math.cos( angle2/scale)
print
print "shrink factor = " + repr( shrink )
print "shrink factor 2 = " + repr( shrink * (2.0**32) )
print "expansion factor = " + repr(1/shrink)
print ""

View file

@ -1 +0,0 @@
*.pyc

View file

@ -1,584 +0,0 @@
# Content (c) 2002, 2004, 2006, 2007, 2008, 2009
# David Turner <david@freetype.org>
#
# This file contains routines used to parse the content of documentation
# comment blocks and build more structured objects out of them.
#
from sources import *
from utils import *
import string, re
# this regular expression is used to detect code sequences. these
# are simply code fragments embedded in '{' and '}' like in:
#
# {
# x = y + z;
# if ( zookoo == 2 )
# {
# foobar();
# }
# }
#
# note that indentation of the starting and ending accolades must be
# exactly the same. the code sequence can contain accolades at greater
# indentation
#
re_code_start = re.compile( r"(\s*){\s*$" )
re_code_end = re.compile( r"(\s*)}\s*$" )
# this regular expression is used to isolate identifiers from
# other text
#
re_identifier = re.compile( r'(\w*)' )
# we collect macros ending in `_H'; while outputting the object data, we use
# this info together with the object's file location to emit the appropriate
# header file macro and name before the object itself
#
re_header_macro = re.compile( r'^#define\s{1,}(\w{1,}_H)\s{1,}<(.*)>' )
#############################################################################
#
# The DocCode class is used to store source code lines.
#
# 'self.lines' contains a set of source code lines that will be dumped as
# HTML in a <PRE> tag.
#
# The object is filled line by line by the parser; it strips the leading
# "margin" space from each input line before storing it in 'self.lines'.
#
class DocCode:
def __init__( self, margin, lines ):
self.lines = []
self.words = None
# remove margin spaces
for l in lines:
if string.strip( l[:margin] ) == "":
l = l[margin:]
self.lines.append( l )
def dump( self, prefix = "", width = 60 ):
lines = self.dump_lines( 0, width )
for l in lines:
print prefix + l
def dump_lines( self, margin = 0, width = 60 ):
result = []
for l in self.lines:
result.append( " " * margin + l )
return result
#############################################################################
#
# The DocPara class is used to store "normal" text paragraph.
#
# 'self.words' contains the list of words that make up the paragraph
#
class DocPara:
def __init__( self, lines ):
self.lines = None
self.words = []
for l in lines:
l = string.strip( l )
self.words.extend( string.split( l ) )
def dump( self, prefix = "", width = 60 ):
lines = self.dump_lines( 0, width )
for l in lines:
print prefix + l
def dump_lines( self, margin = 0, width = 60 ):
cur = "" # current line
col = 0 # current width
result = []
for word in self.words:
ln = len( word )
if col > 0:
ln = ln + 1
if col + ln > width:
result.append( " " * margin + cur )
cur = word
col = len( word )
else:
if col > 0:
cur = cur + " "
cur = cur + word
col = col + ln
if col > 0:
result.append( " " * margin + cur )
return result
#############################################################################
#
# The DocField class is used to store a list containing either DocPara or
# DocCode objects. Each DocField also has an optional "name" which is used
# when the object corresponds to a field or value definition
#
class DocField:
def __init__( self, name, lines ):
self.name = name # can be None for normal paragraphs/sources
self.items = [] # list of items
mode_none = 0 # start parsing mode
mode_code = 1 # parsing code sequences
mode_para = 3 # parsing normal paragraph
margin = -1 # current code sequence indentation
cur_lines = []
# now analyze the markup lines to see if they contain paragraphs,
# code sequences or fields definitions
#
start = 0
mode = mode_none
for l in lines:
# are we parsing a code sequence ?
if mode == mode_code:
m = re_code_end.match( l )
if m and len( m.group( 1 ) ) <= margin:
# that's it, we finished the code sequence
code = DocCode( 0, cur_lines )
self.items.append( code )
margin = -1
cur_lines = []
mode = mode_none
else:
# nope, continue the code sequence
cur_lines.append( l[margin:] )
else:
# start of code sequence ?
m = re_code_start.match( l )
if m:
# save current lines
if cur_lines:
para = DocPara( cur_lines )
self.items.append( para )
cur_lines = []
# switch to code extraction mode
margin = len( m.group( 1 ) )
mode = mode_code
else:
if not string.split( l ) and cur_lines:
# if the line is empty, we end the current paragraph,
# if any
para = DocPara( cur_lines )
self.items.append( para )
cur_lines = []
else:
# otherwise, simply add the line to the current
# paragraph
cur_lines.append( l )
if mode == mode_code:
# unexpected end of code sequence
code = DocCode( margin, cur_lines )
self.items.append( code )
elif cur_lines:
para = DocPara( cur_lines )
self.items.append( para )
def dump( self, prefix = "" ):
if self.field:
print prefix + self.field + " ::"
prefix = prefix + "----"
first = 1
for p in self.items:
if not first:
print ""
p.dump( prefix )
first = 0
def dump_lines( self, margin = 0, width = 60 ):
result = []
nl = None
for p in self.items:
if nl:
result.append( "" )
result.extend( p.dump_lines( margin, width ) )
nl = 1
return result
# this regular expression is used to detect field definitions
#
re_field = re.compile( r"\s*(\w*|\w(\w|\.)*\w)\s*::" )
class DocMarkup:
def __init__( self, tag, lines ):
self.tag = string.lower( tag )
self.fields = []
cur_lines = []
field = None
mode = 0
for l in lines:
m = re_field.match( l )
if m:
# we detected the start of a new field definition
# first, save the current one
if cur_lines:
f = DocField( field, cur_lines )
self.fields.append( f )
cur_lines = []
field = None
field = m.group( 1 ) # record field name
ln = len( m.group( 0 ) )
l = " " * ln + l[ln:]
cur_lines = [l]
else:
cur_lines.append( l )
if field or cur_lines:
f = DocField( field, cur_lines )
self.fields.append( f )
def get_name( self ):
try:
return self.fields[0].items[0].words[0]
except:
return None
def get_start( self ):
try:
result = ""
for word in self.fields[0].items[0].words:
result = result + " " + word
return result[1:]
except:
return "ERROR"
def dump( self, margin ):
print " " * margin + "<" + self.tag + ">"
for f in self.fields:
f.dump( " " )
print " " * margin + "</" + self.tag + ">"
class DocChapter:
def __init__( self, block ):
self.block = block
self.sections = []
if block:
self.name = block.name
self.title = block.get_markup_words( "title" )
self.order = block.get_markup_words( "sections" )
else:
self.name = "Other"
self.title = string.split( "Miscellaneous" )
self.order = []
class DocSection:
def __init__( self, name = "Other" ):
self.name = name
self.blocks = {}
self.block_names = [] # ordered block names in section
self.defs = []
self.abstract = ""
self.description = ""
self.order = []
self.title = "ERROR"
self.chapter = None
def add_def( self, block ):
self.defs.append( block )
def add_block( self, block ):
self.block_names.append( block.name )
self.blocks[block.name] = block
def process( self ):
# look up one block that contains a valid section description
for block in self.defs:
title = block.get_markup_text( "title" )
if title:
self.title = title
self.abstract = block.get_markup_words( "abstract" )
self.description = block.get_markup_items( "description" )
self.order = block.get_markup_words( "order" )
return
def reorder( self ):
self.block_names = sort_order_list( self.block_names, self.order )
class ContentProcessor:
def __init__( self ):
"""initialize a block content processor"""
self.reset()
self.sections = {} # dictionary of documentation sections
self.section = None # current documentation section
self.chapters = [] # list of chapters
self.headers = {} # dictionary of header macros
def set_section( self, section_name ):
"""set current section during parsing"""
if not self.sections.has_key( section_name ):
section = DocSection( section_name )
self.sections[section_name] = section
self.section = section
else:
self.section = self.sections[section_name]
def add_chapter( self, block ):
chapter = DocChapter( block )
self.chapters.append( chapter )
def reset( self ):
"""reset the content processor for a new block"""
self.markups = []
self.markup = None
self.markup_lines = []
def add_markup( self ):
"""add a new markup section"""
if self.markup and self.markup_lines:
# get rid of last line of markup if it's empty
marks = self.markup_lines
if len( marks ) > 0 and not string.strip( marks[-1] ):
self.markup_lines = marks[:-1]
m = DocMarkup( self.markup, self.markup_lines )
self.markups.append( m )
self.markup = None
self.markup_lines = []
def process_content( self, content ):
"""process a block content and return a list of DocMarkup objects
corresponding to it"""
markup = None
markup_lines = []
first = 1
for line in content:
found = None
for t in re_markup_tags:
m = t.match( line )
if m:
found = string.lower( m.group( 1 ) )
prefix = len( m.group( 0 ) )
line = " " * prefix + line[prefix:] # remove markup from line
break
# is it the start of a new markup section ?
if found:
first = 0
self.add_markup() # add current markup content
self.markup = found
if len( string.strip( line ) ) > 0:
self.markup_lines.append( line )
elif first == 0:
self.markup_lines.append( line )
self.add_markup()
return self.markups
def parse_sources( self, source_processor ):
blocks = source_processor.blocks
count = len( blocks )
for n in range( count ):
source = blocks[n]
if source.content:
# this is a documentation comment, we need to catch
# all following normal blocks in the "follow" list
#
follow = []
m = n + 1
while m < count and not blocks[m].content:
follow.append( blocks[m] )
m = m + 1
doc_block = DocBlock( source, follow, self )
def finish( self ):
# process all sections to extract their abstract, description
# and ordered list of items
#
for sec in self.sections.values():
sec.process()
# process chapters to check that all sections are correctly
# listed there
for chap in self.chapters:
for sec in chap.order:
if self.sections.has_key( sec ):
section = self.sections[sec]
section.chapter = chap
section.reorder()
chap.sections.append( section )
else:
sys.stderr.write( "WARNING: chapter '" + \
chap.name + "' in " + chap.block.location() + \
" lists unknown section '" + sec + "'\n" )
# check that all sections are in a chapter
#
others = []
for sec in self.sections.values():
if not sec.chapter:
others.append( sec )
# create a new special chapter for all remaining sections
# when necessary
#
if others:
chap = DocChapter( None )
chap.sections = others
self.chapters.append( chap )
class DocBlock:
def __init__( self, source, follow, processor ):
processor.reset()
self.source = source
self.code = []
self.type = "ERRTYPE"
self.name = "ERRNAME"
self.section = processor.section
self.markups = processor.process_content( source.content )
# compute block type from first markup tag
try:
self.type = self.markups[0].tag
except:
pass
# compute block name from first markup paragraph
try:
markup = self.markups[0]
para = markup.fields[0].items[0]
name = para.words[0]
m = re_identifier.match( name )
if m:
name = m.group( 1 )
self.name = name
except:
pass
if self.type == "section":
# detect new section starts
processor.set_section( self.name )
processor.section.add_def( self )
elif self.type == "chapter":
# detect new chapter
processor.add_chapter( self )
else:
processor.section.add_block( self )
# now, compute the source lines relevant to this documentation
# block. We keep normal comments in for obvious reasons (??)
source = []
for b in follow:
if b.format:
break
for l in b.lines:
# collect header macro definitions
m = re_header_macro.match( l )
if m:
processor.headers[m.group( 2 )] = m.group( 1 );
# we use "/* */" as a separator
if re_source_sep.match( l ):
break
source.append( l )
# now strip the leading and trailing empty lines from the sources
start = 0
end = len( source ) - 1
while start < end and not string.strip( source[start] ):
start = start + 1
while start < end and not string.strip( source[end] ):
end = end - 1
if start == end and not string.strip( source[start] ):
self.code = []
else:
self.code = source[start:end + 1]
def location( self ):
return self.source.location()
def get_markup( self, tag_name ):
"""return the DocMarkup corresponding to a given tag in a block"""
for m in self.markups:
if m.tag == string.lower( tag_name ):
return m
return None
def get_markup_name( self, tag_name ):
"""return the name of a given primary markup in a block"""
try:
m = self.get_markup( tag_name )
return m.get_name()
except:
return None
def get_markup_words( self, tag_name ):
try:
m = self.get_markup( tag_name )
return m.fields[0].items[0].words
except:
return []
def get_markup_text( self, tag_name ):
result = self.get_markup_words( tag_name )
return string.join( result )
def get_markup_items( self, tag_name ):
try:
m = self.get_markup( tag_name )
return m.fields[0].items
except:
return None
# eof

View file

@ -1,113 +0,0 @@
#!/usr/bin/env python
#
# DocBeauty (c) 2003, 2004, 2008 David Turner <david@freetype.org>
#
# This program is used to beautify the documentation comments used
# in the FreeType 2 public headers.
#
from sources import *
from content import *
from utils import *
import utils
import sys, os, time, string, getopt
content_processor = ContentProcessor()
def beautify_block( block ):
if block.content:
content_processor.reset()
markups = content_processor.process_content( block.content )
text = []
first = 1
for markup in markups:
text.extend( markup.beautify( first ) )
first = 0
# now beautify the documentation "borders" themselves
lines = [" /*************************************************************************"]
for l in text:
lines.append( " *" + l )
lines.append( " */" )
block.lines = lines
def usage():
print "\nDocBeauty 0.1 Usage information\n"
print " docbeauty [options] file1 [file2 ...]\n"
print "using the following options:\n"
print " -h : print this page"
print " -b : backup original files with the 'orig' extension"
print ""
print " --backup : same as -b"
def main( argv ):
"""main program loop"""
global output_dir
try:
opts, args = getopt.getopt( sys.argv[1:], \
"hb", \
["help", "backup"] )
except getopt.GetoptError:
usage()
sys.exit( 2 )
if args == []:
usage()
sys.exit( 1 )
# process options
#
output_dir = None
do_backup = None
for opt in opts:
if opt[0] in ( "-h", "--help" ):
usage()
sys.exit( 0 )
if opt[0] in ( "-b", "--backup" ):
do_backup = 1
# create context and processor
source_processor = SourceProcessor()
# retrieve the list of files to process
file_list = make_file_list( args )
for filename in file_list:
source_processor.parse_file( filename )
for block in source_processor.blocks:
beautify_block( block )
new_name = filename + ".new"
ok = None
try:
file = open( new_name, "wt" )
for block in source_processor.blocks:
for line in block.lines:
file.write( line )
file.write( "\n" )
file.close()
except:
ok = 0
# if called from the command line
#
if __name__ == '__main__':
main( sys.argv )
# eof

View file

@ -1,106 +0,0 @@
#!/usr/bin/env python
#
# DocMaker (c) 2002, 2004, 2008 David Turner <david@freetype.org>
#
# This program is a re-write of the original DocMaker took used
# to generate the API Reference of the FreeType font engine
# by converting in-source comments into structured HTML.
#
# This new version is capable of outputting XML data, as well
# as accepts more liberal formatting options.
#
# It also uses regular expression matching and substitution
# to speed things significantly.
#
from sources import *
from content import *
from utils import *
from formatter import *
from tohtml import *
import utils
import sys, os, time, string, glob, getopt
def usage():
print "\nDocMaker Usage information\n"
print " docmaker [options] file1 [file2 ...]\n"
print "using the following options:\n"
print " -h : print this page"
print " -t : set project title, as in '-t \"My Project\"'"
print " -o : set output directory, as in '-o mydir'"
print " -p : set documentation prefix, as in '-p ft2'"
print ""
print " --title : same as -t, as in '--title=\"My Project\"'"
print " --output : same as -o, as in '--output=mydir'"
print " --prefix : same as -p, as in '--prefix=ft2'"
def main( argv ):
"""main program loop"""
global output_dir
try:
opts, args = getopt.getopt( sys.argv[1:], \
"ht:o:p:", \
["help", "title=", "output=", "prefix="] )
except getopt.GetoptError:
usage()
sys.exit( 2 )
if args == []:
usage()
sys.exit( 1 )
# process options
#
project_title = "Project"
project_prefix = None
output_dir = None
for opt in opts:
if opt[0] in ( "-h", "--help" ):
usage()
sys.exit( 0 )
if opt[0] in ( "-t", "--title" ):
project_title = opt[1]
if opt[0] in ( "-o", "--output" ):
utils.output_dir = opt[1]
if opt[0] in ( "-p", "--prefix" ):
project_prefix = opt[1]
check_output()
# create context and processor
source_processor = SourceProcessor()
content_processor = ContentProcessor()
# retrieve the list of files to process
file_list = make_file_list( args )
for filename in file_list:
source_processor.parse_file( filename )
content_processor.parse_sources( source_processor )
# process sections
content_processor.finish()
formatter = HtmlFormatter( content_processor, project_title, project_prefix )
formatter.toc_dump()
formatter.index_dump()
formatter.section_dump_all()
# if called from the command line
#
if __name__ == '__main__':
main( sys.argv )
# eof

View file

@ -1,188 +0,0 @@
# Formatter (c) 2002, 2004, 2007, 2008 David Turner <david@freetype.org>
#
from sources import *
from content import *
from utils import *
# This is the base Formatter class. Its purpose is to convert
# a content processor's data into specific documents (i.e., table of
# contents, global index, and individual API reference indices).
#
# You need to sub-class it to output anything sensible. For example,
# the file tohtml.py contains the definition of the HtmlFormatter sub-class
# used to output -- you guessed it -- HTML.
#
class Formatter:
def __init__( self, processor ):
self.processor = processor
self.identifiers = {}
self.chapters = processor.chapters
self.sections = processor.sections.values()
self.block_index = []
# store all blocks in a dictionary
self.blocks = []
for section in self.sections:
for block in section.blocks.values():
self.add_identifier( block.name, block )
# add enumeration values to the index, since this is useful
for markup in block.markups:
if markup.tag == 'values':
for field in markup.fields:
self.add_identifier( field.name, block )
self.block_index = self.identifiers.keys()
self.block_index.sort( index_sort )
def add_identifier( self, name, block ):
if self.identifiers.has_key( name ):
# duplicate name!
sys.stderr.write( \
"WARNING: duplicate definition for '" + name + "' in " + \
block.location() + ", previous definition in " + \
self.identifiers[name].location() + "\n" )
else:
self.identifiers[name] = block
#
# Formatting the table of contents
#
def toc_enter( self ):
pass
def toc_chapter_enter( self, chapter ):
pass
def toc_section_enter( self, section ):
pass
def toc_section_exit( self, section ):
pass
def toc_chapter_exit( self, chapter ):
pass
def toc_index( self, index_filename ):
pass
def toc_exit( self ):
pass
def toc_dump( self, toc_filename = None, index_filename = None ):
output = None
if toc_filename:
output = open_output( toc_filename )
self.toc_enter()
for chap in self.processor.chapters:
self.toc_chapter_enter( chap )
for section in chap.sections:
self.toc_section_enter( section )
self.toc_section_exit( section )
self.toc_chapter_exit( chap )
self.toc_index( index_filename )
self.toc_exit()
if output:
close_output( output )
#
# Formatting the index
#
def index_enter( self ):
pass
def index_name_enter( self, name ):
pass
def index_name_exit( self, name ):
pass
def index_exit( self ):
pass
def index_dump( self, index_filename = None ):
output = None
if index_filename:
output = open_output( index_filename )
self.index_enter()
for name in self.block_index:
self.index_name_enter( name )
self.index_name_exit( name )
self.index_exit()
if output:
close_output( output )
#
# Formatting a section
#
def section_enter( self, section ):
pass
def block_enter( self, block ):
pass
def markup_enter( self, markup, block = None ):
pass
def field_enter( self, field, markup = None, block = None ):
pass
def field_exit( self, field, markup = None, block = None ):
pass
def markup_exit( self, markup, block = None ):
pass
def block_exit( self, block ):
pass
def section_exit( self, section ):
pass
def section_dump( self, section, section_filename = None ):
output = None
if section_filename:
output = open_output( section_filename )
self.section_enter( section )
for name in section.block_names:
block = self.identifiers[name]
self.block_enter( block )
for markup in block.markups[1:]: # always ignore first markup!
self.markup_enter( markup, block )
for field in markup.fields:
self.field_enter( field, markup, block )
self.field_exit( field, markup, block )
self.markup_exit( markup, block )
self.block_exit( block )
self.section_exit( section )
if output:
close_output( output )
def section_dump_all( self ):
for section in self.sections:
self.section_dump( section )
# eof

View file

@ -1,347 +0,0 @@
# Sources (c) 2002, 2003, 2004, 2006, 2007, 2008, 2009
# David Turner <david@freetype.org>
#
#
# this file contains definitions of classes needed to decompose
# C sources files into a series of multi-line "blocks". There are
# two kinds of blocks:
#
# - normal blocks, which contain source code or ordinary comments
#
# - documentation blocks, which have restricted formatting, and
# whose text always start with a documentation markup tag like
# "<Function>", "<Type>", etc..
#
# the routines used to process the content of documentation blocks
# are not contained here, but in "content.py"
#
# the classes and methods found here only deal with text parsing
# and basic documentation block extraction
#
import fileinput, re, sys, os, string
################################################################
##
## BLOCK FORMAT PATTERN
##
## A simple class containing compiled regular expressions used
## to detect potential documentation format block comments within
## C source code
##
## note that the 'column' pattern must contain a group that will
## be used to "unbox" the content of documentation comment blocks
##
class SourceBlockFormat:
def __init__( self, id, start, column, end ):
"""create a block pattern, used to recognize special documentation blocks"""
self.id = id
self.start = re.compile( start, re.VERBOSE )
self.column = re.compile( column, re.VERBOSE )
self.end = re.compile( end, re.VERBOSE )
#
# format 1 documentation comment blocks look like the following:
#
# /************************************/
# /* */
# /* */
# /* */
# /************************************/
#
# we define a few regular expressions here to detect them
#
start = r'''
\s* # any number of whitespace
/\*{2,}/ # followed by '/' and at least two asterisks then '/'
\s*$ # probably followed by whitespace
'''
column = r'''
\s* # any number of whitespace
/\*{1} # followed by '/' and precisely one asterisk
([^*].*) # followed by anything (group 1)
\*{1}/ # followed by one asterisk and a '/'
\s*$ # probably followed by whitespace
'''
re_source_block_format1 = SourceBlockFormat( 1, start, column, start )
#
# format 2 documentation comment blocks look like the following:
#
# /************************************ (at least 2 asterisks)
# *
# *
# *
# *
# **/ (1 or more asterisks at the end)
#
# we define a few regular expressions here to detect them
#
start = r'''
\s* # any number of whitespace
/\*{2,} # followed by '/' and at least two asterisks
\s*$ # probably followed by whitespace
'''
column = r'''
\s* # any number of whitespace
\*{1}(?!/) # followed by precisely one asterisk not followed by `/'
(.*) # then anything (group1)
'''
end = r'''
\s* # any number of whitespace
\*+/ # followed by at least one asterisk, then '/'
'''
re_source_block_format2 = SourceBlockFormat( 2, start, column, end )
#
# the list of supported documentation block formats, we could add new ones
# relatively easily
#
re_source_block_formats = [re_source_block_format1, re_source_block_format2]
#
# the following regular expressions corresponds to markup tags
# within the documentation comment blocks. they're equivalent
# despite their different syntax
#
# notice how each markup tag _must_ begin a new line
#
re_markup_tag1 = re.compile( r'''\s*<(\w*)>''' ) # <xxxx> format
re_markup_tag2 = re.compile( r'''\s*@(\w*):''' ) # @xxxx: format
#
# the list of supported markup tags, we could add new ones relatively
# easily
#
re_markup_tags = [re_markup_tag1, re_markup_tag2]
#
# used to detect a cross-reference, after markup tags have been stripped
#
re_crossref = re.compile( r'@(\w*)(.*)' )
#
# used to detect italic and bold styles in paragraph text
#
re_italic = re.compile( r"_(\w(\w|')*)_(.*)" ) # _italic_
re_bold = re.compile( r"\*(\w(\w|')*)\*(.*)" ) # *bold*
#
# used to detect the end of commented source lines
#
re_source_sep = re.compile( r'\s*/\*\s*\*/' )
#
# used to perform cross-reference within source output
#
re_source_crossref = re.compile( r'(\W*)(\w*)' )
#
# a list of reserved source keywords
#
re_source_keywords = re.compile( '''\\b ( typedef |
struct |
enum |
union |
const |
char |
int |
short |
long |
void |
signed |
unsigned |
\#include |
\#define |
\#undef |
\#if |
\#ifdef |
\#ifndef |
\#else |
\#endif ) \\b''', re.VERBOSE )
################################################################
##
## SOURCE BLOCK CLASS
##
## A SourceProcessor is in charge of reading a C source file
## and decomposing it into a series of different "SourceBlocks".
## each one of these blocks can be made of the following data:
##
## - A documentation comment block that starts with "/**" and
## whose exact format will be discussed later
##
## - normal sources lines, including comments
##
## the important fields in a text block are the following ones:
##
## self.lines : a list of text lines for the corresponding block
##
## self.content : for documentation comment blocks only, this is the
## block content that has been "unboxed" from its
## decoration. This is None for all other blocks
## (i.e. sources or ordinary comments with no starting
## markup tag)
##
class SourceBlock:
def __init__( self, processor, filename, lineno, lines ):
self.processor = processor
self.filename = filename
self.lineno = lineno
self.lines = lines[:]
self.format = processor.format
self.content = []
if self.format == None:
return
words = []
# extract comment lines
lines = []
for line0 in self.lines:
m = self.format.column.match( line0 )
if m:
lines.append( m.group( 1 ) )
# now, look for a markup tag
for l in lines:
l = string.strip( l )
if len( l ) > 0:
for tag in re_markup_tags:
if tag.match( l ):
self.content = lines
return
def location( self ):
return "(" + self.filename + ":" + repr( self.lineno ) + ")"
# debugging only - not used in normal operations
def dump( self ):
if self.content:
print "{{{content start---"
for l in self.content:
print l
print "---content end}}}"
return
fmt = ""
if self.format:
fmt = repr( self.format.id ) + " "
for line in self.lines:
print line
################################################################
##
## SOURCE PROCESSOR CLASS
##
## The SourceProcessor is in charge of reading a C source file
## and decomposing it into a series of different "SourceBlock"
## objects.
##
## each one of these blocks can be made of the following data:
##
## - A documentation comment block that starts with "/**" and
## whose exact format will be discussed later
##
## - normal sources lines, include comments
##
##
class SourceProcessor:
def __init__( self ):
"""initialize a source processor"""
self.blocks = []
self.filename = None
self.format = None
self.lines = []
def reset( self ):
"""reset a block processor, clean all its blocks"""
self.blocks = []
self.format = None
def parse_file( self, filename ):
"""parse a C source file, and add its blocks to the processor's list"""
self.reset()
self.filename = filename
fileinput.close()
self.format = None
self.lineno = 0
self.lines = []
for line in fileinput.input( filename ):
# strip trailing newlines, important on Windows machines!
if line[-1] == '\012':
line = line[0:-1]
if self.format == None:
self.process_normal_line( line )
else:
if self.format.end.match( line ):
# that's a normal block end, add it to 'lines' and
# create a new block
self.lines.append( line )
self.add_block_lines()
elif self.format.column.match( line ):
# that's a normal column line, add it to 'lines'
self.lines.append( line )
else:
# humm.. this is an unexpected block end,
# create a new block, but don't process the line
self.add_block_lines()
# we need to process the line again
self.process_normal_line( line )
# record the last lines
self.add_block_lines()
def process_normal_line( self, line ):
"""process a normal line and check whether it is the start of a new block"""
for f in re_source_block_formats:
if f.start.match( line ):
self.add_block_lines()
self.format = f
self.lineno = fileinput.filelineno()
self.lines.append( line )
def add_block_lines( self ):
"""add the current accumulated lines and create a new block"""
if self.lines != []:
block = SourceBlock( self, self.filename, self.lineno, self.lines )
self.blocks.append( block )
self.format = None
self.lines = []
# debugging only, not used in normal operations
def dump( self ):
"""print all blocks in a processor"""
for b in self.blocks:
b.dump()
# eof

View file

@ -1,593 +0,0 @@
# ToHTML (c) 2002, 2003, 2005, 2006, 2007, 2008
# David Turner <david@freetype.org>
from sources import *
from content import *
from formatter import *
import time
# The following defines the HTML header used by all generated pages.
html_header_1 = """\
<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN"
"http://www.w3.org/TR/html4/loose.dtd">
<html>
<head>
<meta http-equiv="Content-Type" content="text/html; charset=utf-8">
<title>\
"""
html_header_2 = """\
API Reference</title>
<style type="text/css">
body { font-family: Verdana, Geneva, Arial, Helvetica, serif;
color: #000000;
background: #FFFFFF; }
p { text-align: justify; }
h1 { text-align: center; }
li { text-align: justify; }
td { padding: 0 0.5em 0 0.5em; }
td.left { padding: 0 0.5em 0 0.5em;
text-align: left; }
a:link { color: #0000EF; }
a:visited { color: #51188E; }
a:hover { color: #FF0000; }
span.keyword { font-family: monospace;
text-align: left;
white-space: pre;
color: darkblue; }
pre.colored { color: blue; }
ul.empty { list-style-type: none; }
</style>
</head>
<body>
"""
html_header_3 = """
<table align=center><tr><td><font size=-1>[<a href="\
"""
html_header_3i = """
<table align=center><tr><td width="100%"></td>
<td><font size=-1>[<a href="\
"""
html_header_4 = """\
">Index</a>]</font></td>
<td width="100%"></td>
<td><font size=-1>[<a href="\
"""
html_header_5 = """\
">TOC</a>]</font></td></tr></table>
<center><h1>\
"""
html_header_5t = """\
">Index</a>]</font></td>
<td width="100%"></td></tr></table>
<center><h1>\
"""
html_header_6 = """\
API Reference</h1></center>
"""
# The HTML footer used by all generated pages.
html_footer = """\
</body>
</html>\
"""
# The header and footer used for each section.
section_title_header = "<center><h1>"
section_title_footer = "</h1></center>"
# The header and footer used for code segments.
code_header = '<pre class="colored">'
code_footer = '</pre>'
# Paragraph header and footer.
para_header = "<p>"
para_footer = "</p>"
# Block header and footer.
block_header = '<table align=center width="75%"><tr><td>'
block_footer_start = """\
</td></tr></table>
<hr width="75%">
<table align=center width="75%"><tr><td><font size=-2>[<a href="\
"""
block_footer_middle = """\
">Index</a>]</font></td>
<td width="100%"></td>
<td><font size=-2>[<a href="\
"""
block_footer_end = """\
">TOC</a>]</font></td></tr></table>
"""
# Description header/footer.
description_header = '<table align=center width="87%"><tr><td>'
description_footer = "</td></tr></table><br>"
# Marker header/inter/footer combination.
marker_header = '<table align=center width="87%" cellpadding=5><tr bgcolor="#EEEEFF"><td><em><b>'
marker_inter = "</b></em></td></tr><tr><td>"
marker_footer = "</td></tr></table>"
# Header location header/footer.
header_location_header = '<table align=center width="87%"><tr><td>'
header_location_footer = "</td></tr></table><br>"
# Source code extracts header/footer.
source_header = '<table align=center width="87%"><tr bgcolor="#D6E8FF"><td><pre>\n'
source_footer = "\n</pre></table><br>"
# Chapter header/inter/footer.
chapter_header = '<br><table align=center width="75%"><tr><td><h2>'
chapter_inter = '</h2><ul class="empty"><li>'
chapter_footer = '</li></ul></td></tr></table>'
# Index footer.
index_footer_start = """\
<hr>
<table><tr><td width="100%"></td>
<td><font size=-2>[<a href="\
"""
index_footer_end = """\
">TOC</a>]</font></td></tr></table>
"""
# TOC footer.
toc_footer_start = """\
<hr>
<table><tr><td><font size=-2>[<a href="\
"""
toc_footer_end = """\
">Index</a>]</font></td>
<td width="100%"></td>
</tr></table>
"""
# source language keyword coloration/styling
keyword_prefix = '<span class="keyword">'
keyword_suffix = '</span>'
section_synopsis_header = '<h2>Synopsis</h2>'
section_synopsis_footer = ''
# Translate a single line of source to HTML. This will convert
# a "<" into "&lt.", ">" into "&gt.", etc.
def html_quote( line ):
result = string.replace( line, "&", "&amp;" )
result = string.replace( result, "<", "&lt;" )
result = string.replace( result, ">", "&gt;" )
return result
# same as 'html_quote', but ignores left and right brackets
def html_quote0( line ):
return string.replace( line, "&", "&amp;" )
def dump_html_code( lines, prefix = "" ):
# clean the last empty lines
l = len( self.lines )
while l > 0 and string.strip( self.lines[l - 1] ) == "":
l = l - 1
# The code footer should be directly appended to the last code
# line to avoid an additional blank line.
print prefix + code_header,
for line in self.lines[0 : l + 1]:
print '\n' + prefix + html_quote( line ),
print prefix + code_footer,
class HtmlFormatter( Formatter ):
def __init__( self, processor, project_title, file_prefix ):
Formatter.__init__( self, processor )
global html_header_1, html_header_2, html_header_3
global html_header_4, html_header_5, html_footer
if file_prefix:
file_prefix = file_prefix + "-"
else:
file_prefix = ""
self.headers = processor.headers
self.project_title = project_title
self.file_prefix = file_prefix
self.html_header = html_header_1 + project_title + \
html_header_2 + \
html_header_3 + file_prefix + "index.html" + \
html_header_4 + file_prefix + "toc.html" + \
html_header_5 + project_title + \
html_header_6
self.html_index_header = html_header_1 + project_title + \
html_header_2 + \
html_header_3i + file_prefix + "toc.html" + \
html_header_5 + project_title + \
html_header_6
self.html_toc_header = html_header_1 + project_title + \
html_header_2 + \
html_header_3 + file_prefix + "index.html" + \
html_header_5t + project_title + \
html_header_6
self.html_footer = "<center><font size=""-2"">generated on " + \
time.asctime( time.localtime( time.time() ) ) + \
"</font></center>" + html_footer
self.columns = 3
def make_section_url( self, section ):
return self.file_prefix + section.name + ".html"
def make_block_url( self, block ):
return self.make_section_url( block.section ) + "#" + block.name
def make_html_words( self, words ):
""" convert a series of simple words into some HTML text """
line = ""
if words:
line = html_quote( words[0] )
for w in words[1:]:
line = line + " " + html_quote( w )
return line
def make_html_word( self, word ):
"""analyze a simple word to detect cross-references and styling"""
# look for cross-references
m = re_crossref.match( word )
if m:
try:
name = m.group( 1 )
rest = m.group( 2 )
block = self.identifiers[name]
url = self.make_block_url( block )
return '<a href="' + url + '">' + name + '</a>' + rest
except:
# we detected a cross-reference to an unknown item
sys.stderr.write( \
"WARNING: undefined cross reference '" + name + "'.\n" )
return '?' + name + '?' + rest
# look for italics and bolds
m = re_italic.match( word )
if m:
name = m.group( 1 )
rest = m.group( 3 )
return '<i>' + name + '</i>' + rest
m = re_bold.match( word )
if m:
name = m.group( 1 )
rest = m.group( 3 )
return '<b>' + name + '</b>' + rest
return html_quote( word )
def make_html_para( self, words ):
""" convert words of a paragraph into tagged HTML text, handle xrefs """
line = ""
if words:
line = self.make_html_word( words[0] )
for word in words[1:]:
line = line + " " + self.make_html_word( word )
# convert `...' quotations into real left and right single quotes
line = re.sub( r"(^|\W)`(.*?)'(\W|$)", \
r'\1&lsquo;\2&rsquo;\3', \
line )
# convert tilde into non-breakable space
line = string.replace( line, "~", "&nbsp;" )
return para_header + line + para_footer
def make_html_code( self, lines ):
""" convert a code sequence to HTML """
line = code_header + '\n'
for l in lines:
line = line + html_quote( l ) + '\n'
return line + code_footer
def make_html_items( self, items ):
""" convert a field's content into some valid HTML """
lines = []
for item in items:
if item.lines:
lines.append( self.make_html_code( item.lines ) )
else:
lines.append( self.make_html_para( item.words ) )
return string.join( lines, '\n' )
def print_html_items( self, items ):
print self.make_html_items( items )
def print_html_field( self, field ):
if field.name:
print "<table><tr valign=top><td><b>" + field.name + "</b></td><td>"
print self.make_html_items( field.items )
if field.name:
print "</td></tr></table>"
def html_source_quote( self, line, block_name = None ):
result = ""
while line:
m = re_source_crossref.match( line )
if m:
name = m.group( 2 )
prefix = html_quote( m.group( 1 ) )
length = len( m.group( 0 ) )
if name == block_name:
# this is the current block name, if any
result = result + prefix + '<b>' + name + '</b>'
elif re_source_keywords.match( name ):
# this is a C keyword
result = result + prefix + keyword_prefix + name + keyword_suffix
elif self.identifiers.has_key( name ):
# this is a known identifier
block = self.identifiers[name]
result = result + prefix + '<a href="' + \
self.make_block_url( block ) + '">' + name + '</a>'
else:
result = result + html_quote( line[:length] )
line = line[length:]
else:
result = result + html_quote( line )
line = []
return result
def print_html_field_list( self, fields ):
print "<p></p>"
print "<table cellpadding=3 border=0>"
for field in fields:
if len( field.name ) > 22:
print "<tr valign=top><td colspan=0><b>" + field.name + "</b></td></tr>"
print "<tr valign=top><td></td><td>"
else:
print "<tr valign=top><td><b>" + field.name + "</b></td><td>"
self.print_html_items( field.items )
print "</td></tr>"
print "</table>"
def print_html_markup( self, markup ):
table_fields = []
for field in markup.fields:
if field.name:
# we begin a new series of field or value definitions, we
# will record them in the 'table_fields' list before outputting
# all of them as a single table
#
table_fields.append( field )
else:
if table_fields:
self.print_html_field_list( table_fields )
table_fields = []
self.print_html_items( field.items )
if table_fields:
self.print_html_field_list( table_fields )
#
# Formatting the index
#
def index_enter( self ):
print self.html_index_header
self.index_items = {}
def index_name_enter( self, name ):
block = self.identifiers[name]
url = self.make_block_url( block )
self.index_items[name] = url
def index_exit( self ):
# block_index already contains the sorted list of index names
count = len( self.block_index )
rows = ( count + self.columns - 1 ) / self.columns
print "<table align=center border=0 cellpadding=0 cellspacing=0>"
for r in range( rows ):
line = "<tr>"
for c in range( self.columns ):
i = r + c * rows
if i < count:
bname = self.block_index[r + c * rows]
url = self.index_items[bname]
line = line + '<td><a href="' + url + '">' + bname + '</a></td>'
else:
line = line + '<td></td>'
line = line + "</tr>"
print line
print "</table>"
print index_footer_start + \
self.file_prefix + "toc.html" + \
index_footer_end
print self.html_footer
self.index_items = {}
def index_dump( self, index_filename = None ):
if index_filename == None:
index_filename = self.file_prefix + "index.html"
Formatter.index_dump( self, index_filename )
#
# Formatting the table of content
#
def toc_enter( self ):
print self.html_toc_header
print "<center><h1>Table of Contents</h1></center>"
def toc_chapter_enter( self, chapter ):
print chapter_header + string.join( chapter.title ) + chapter_inter
print "<table cellpadding=5>"
def toc_section_enter( self, section ):
print '<tr valign=top><td class="left">'
print '<a href="' + self.make_section_url( section ) + '">' + \
section.title + '</a></td><td>'
print self.make_html_para( section.abstract )
def toc_section_exit( self, section ):
print "</td></tr>"
def toc_chapter_exit( self, chapter ):
print "</table>"
print chapter_footer
def toc_index( self, index_filename ):
print chapter_header + \
'<a href="' + index_filename + '">Global Index</a>' + \
chapter_inter + chapter_footer
def toc_exit( self ):
print toc_footer_start + \
self.file_prefix + "index.html" + \
toc_footer_end
print self.html_footer
def toc_dump( self, toc_filename = None, index_filename = None ):
if toc_filename == None:
toc_filename = self.file_prefix + "toc.html"
if index_filename == None:
index_filename = self.file_prefix + "index.html"
Formatter.toc_dump( self, toc_filename, index_filename )
#
# Formatting sections
#
def section_enter( self, section ):
print self.html_header
print section_title_header
print section.title
print section_title_footer
maxwidth = 0
for b in section.blocks.values():
if len( b.name ) > maxwidth:
maxwidth = len( b.name )
width = 70 # XXX magic number
if maxwidth <> 0:
# print section synopsis
print section_synopsis_header
print "<table align=center cellspacing=5 cellpadding=0 border=0>"
columns = width / maxwidth
if columns < 1:
columns = 1
count = len( section.block_names )
rows = ( count + columns - 1 ) / columns
for r in range( rows ):
line = "<tr>"
for c in range( columns ):
i = r + c * rows
line = line + '<td></td><td>'
if i < count:
name = section.block_names[i]
line = line + '<a href="#' + name + '">' + name + '</a>'
line = line + '</td>'
line = line + "</tr>"
print line
print "</table><br><br>"
print section_synopsis_footer
print description_header
print self.make_html_items( section.description )
print description_footer
def block_enter( self, block ):
print block_header
# place html anchor if needed
if block.name:
print '<h4><a name="' + block.name + '">' + block.name + '</a></h4>'
# dump the block C source lines now
if block.code:
header = ''
for f in self.headers.keys():
if block.source.filename.find( f ) >= 0:
header = self.headers[f] + ' (' + f + ')'
break;
# if not header:
# sys.stderr.write( \
# 'WARNING: No header macro for ' + block.source.filename + '.\n' )
if header:
print header_location_header
print 'Defined in ' + header + '.'
print header_location_footer
print source_header
for l in block.code:
print self.html_source_quote( l, block.name )
print source_footer
def markup_enter( self, markup, block ):
if markup.tag == "description":
print description_header
else:
print marker_header + markup.tag + marker_inter
self.print_html_markup( markup )
def markup_exit( self, markup, block ):
if markup.tag == "description":
print description_footer
else:
print marker_footer
def block_exit( self, block ):
print block_footer_start + self.file_prefix + "index.html" + \
block_footer_middle + self.file_prefix + "toc.html" + \
block_footer_end
def section_exit( self, section ):
print html_footer
def section_dump_all( self ):
for section in self.sections:
self.section_dump( section, self.file_prefix + section.name + '.html' )
# eof

View file

@ -1,132 +0,0 @@
# Utils (c) 2002, 2004, 2007, 2008 David Turner <david@freetype.org>
#
import string, sys, os, glob
# current output directory
#
output_dir = None
# This function is used to sort the index. It is a simple lexicographical
# sort, except that it places capital letters before lowercase ones.
#
def index_sort( s1, s2 ):
if not s1:
return -1
if not s2:
return 1
l1 = len( s1 )
l2 = len( s2 )
m1 = string.lower( s1 )
m2 = string.lower( s2 )
for i in range( l1 ):
if i >= l2 or m1[i] > m2[i]:
return 1
if m1[i] < m2[i]:
return -1
if s1[i] < s2[i]:
return -1
if s1[i] > s2[i]:
return 1
if l2 > l1:
return -1
return 0
# Sort input_list, placing the elements of order_list in front.
#
def sort_order_list( input_list, order_list ):
new_list = order_list[:]
for id in input_list:
if not id in order_list:
new_list.append( id )
return new_list
# Open the standard output to a given project documentation file. Use
# "output_dir" to determine the filename location if necessary and save the
# old stdout in a tuple that is returned by this function.
#
def open_output( filename ):
global output_dir
if output_dir and output_dir != "":
filename = output_dir + os.sep + filename
old_stdout = sys.stdout
new_file = open( filename, "w" )
sys.stdout = new_file
return ( new_file, old_stdout )
# Close the output that was returned by "close_output".
#
def close_output( output ):
output[0].close()
sys.stdout = output[1]
# Check output directory.
#
def check_output():
global output_dir
if output_dir:
if output_dir != "":
if not os.path.isdir( output_dir ):
sys.stderr.write( "argument" + " '" + output_dir + "' " + \
"is not a valid directory" )
sys.exit( 2 )
else:
output_dir = None
def file_exists( pathname ):
"""checks that a given file exists"""
result = 1
try:
file = open( pathname, "r" )
file.close()
except:
result = None
sys.stderr.write( pathname + " couldn't be accessed\n" )
return result
def make_file_list( args = None ):
"""builds a list of input files from command-line arguments"""
file_list = []
# sys.stderr.write( repr( sys.argv[1 :] ) + '\n' )
if not args:
args = sys.argv[1 :]
for pathname in args:
if string.find( pathname, '*' ) >= 0:
newpath = glob.glob( pathname )
newpath.sort() # sort files -- this is important because
# of the order of files
else:
newpath = [pathname]
file_list.extend( newpath )
if len( file_list ) == 0:
file_list = None
else:
# now filter the file list to remove non-existing ones
file_list = filter( file_exists, file_list )
return file_list
# eof

View file

@ -1,35 +0,0 @@
# TOP_DIR and OBJ_DIR should be set by the user to the right directories,
# if necessary.
TOP_DIR ?= ../../..
OBJ_DIR ?= $(TOP_DIR)/objs
# The setup below is for gcc on a Unix-like platform.
SRC_DIR = $(TOP_DIR)/src/tools/ftrandom
CC = gcc
WFLAGS = -Wmissing-prototypes \
-Wunused \
-Wimplicit \
-Wreturn-type \
-Wparentheses \
-pedantic \
-Wformat \
-Wchar-subscripts \
-Wsequence-point
CFLAGS = $(WFLAGS) \
-g \
-I $(TOP_DIR)/include
LIBS = -lm \
-L $(OBJ_DIR) \
-lfreetype \
-lz
all: $(OBJ_DIR)/ftrandom
$(OBJ_DIR)/ftrandom: $(SRC_DIR)/ftrandom.c $(OBJ_DIR)/libfreetype.a
$(CC) -o $(OBJ_DIR)/ftrandom $(CFLAGS) $(SRC_DIR)/ftrandom.c $(LIBS)
# EOF

View file

@ -1,48 +0,0 @@
ftrandom
--------
This program expects a set of directories containing good fonts, and a set
of extensions of fonts to be tested. It will randomly pick a font, copy it,
introduce and error and then test it.
The FreeType tests are quite basic:
For each erroneous font it
forks off a new tester;
initializes the library;
opens each font in the file;
loads each glyph;
(optionally reviewing the contours of the glyph)
(optionally rasterizing)
closes the face.
If the tester exits with a signal, or takes longer than 20 seconds then
ftrandom saves the erroneous font and continues. If the tester exits
normally or with an error, then the superstructure removes the test font and
continues.
Arguments are:
--all Test every font in the directory(ies) no matter
what its extension (some CID-keyed fonts have no
extension).
--check-outlines Call FT_Outline_Decompose on each glyph.
--dir <dir> Append <dir> to the list of directories to search
for good fonts.
--error-count <cnt> Introduce <cnt> single-byte errors into the
erroneous fonts.
--error-fraction <frac> Multiply the file size of the font by <frac> and
introduce that many errors into the erroneous
font file.
--ext <ext> Add <ext> to the set of font types tested. Known
extensions are `ttf', `otf', `ttc', `cid', `pfb',
`pfa', `bdf', `pcf', `pfr', `fon', `otb', and
`cff'.
--help Print out this list of options.
--nohints Specify FT_LOAD_NO_HINTING when loading glyphs.
--rasterize Call FT_Render_Glyph as well as loading it.
--result <dir> This is the directory in which test files are
placed.
--test <file> Run a single test on a pre-generated testcase.
Done in the current process so it can be debugged
more easily.

View file

@ -1,659 +0,0 @@
/* Copyright (C) 2005, 2007, 2008 by George Williams */
/*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
* EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
* OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
* OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
* ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/* modified by Werner Lemberg <wl@gnu.org> */
/* This file is now part of the FreeType library */
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <strings.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <sys/wait.h>
#include <unistd.h>
#include <dirent.h>
#include <math.h>
#include <signal.h>
#include <time.h>
#include <ft2build.h>
#include FT_FREETYPE_H
#include FT_OUTLINE_H
#define true 1
#define false 0
#define forever for (;;)
static int check_outlines = false;
static int nohints = false;
static int rasterize = false;
static char* results_dir = "results";
#define GOOD_FONTS_DIR "/home/wl/freetype-testfonts"
static char* default_dir_list[] =
{
GOOD_FONTS_DIR,
NULL
};
static char* default_ext_list[] =
{
"ttf",
"otf",
"ttc",
"cid",
"pfb",
"pfa",
"bdf",
"pcf",
"pfr",
"fon",
"otb",
"cff",
NULL
};
static int error_count = 1;
static int error_fraction = 0;
static FT_F26Dot6 font_size = 12 * 64;
static struct fontlist
{
char* name;
int len;
unsigned int isbinary: 1;
unsigned int isascii: 1;
unsigned int ishex: 1;
} *fontlist;
static int fcnt;
static int
FT_MoveTo( const FT_Vector *to,
void *user )
{
return 0;
}
static int
FT_LineTo( const FT_Vector *to,
void *user )
{
return 0;
}
static int
FT_ConicTo( const FT_Vector *_cp,
const FT_Vector *to,
void *user )
{
return 0;
}
static int
FT_CubicTo( const FT_Vector *cp1,
const FT_Vector *cp2,
const FT_Vector *to,
void *user )
{
return 0;
}
static FT_Outline_Funcs outlinefuncs =
{
FT_MoveTo,
FT_LineTo,
FT_ConicTo,
FT_CubicTo,
0, 0 /* No shift, no delta */
};
static void
TestFace( FT_Face face )
{
int gid;
int load_flags = FT_LOAD_DEFAULT;
if ( check_outlines &&
FT_IS_SCALABLE( face ) )
load_flags = FT_LOAD_NO_BITMAP;
if ( nohints )
load_flags |= FT_LOAD_NO_HINTING;
FT_Set_Char_Size( face, 0, font_size, 72, 72 );
for ( gid = 0; gid < face->num_glyphs; ++gid )
{
if ( check_outlines &&
FT_IS_SCALABLE( face ) )
{
if ( !FT_Load_Glyph( face, gid, load_flags ) )
FT_Outline_Decompose( &face->glyph->outline, &outlinefuncs, NULL );
}
else
FT_Load_Glyph( face, gid, load_flags );
if ( rasterize )
FT_Render_Glyph( face->glyph, ft_render_mode_normal );
}
FT_Done_Face( face );
}
static void
ExecuteTest( char* testfont )
{
FT_Library context;
FT_Face face;
int i, num;
if ( FT_Init_FreeType( &context ) )
{
fprintf( stderr, "Can't initialize FreeType.\n" );
exit( 1 );
}
if ( FT_New_Face( context, testfont, 0, &face ) )
{
/* The font is erroneous, so if this fails that's ok. */
exit( 0 );
}
if ( face->num_faces == 1 )
TestFace( face );
else
{
num = face->num_faces;
FT_Done_Face( face );
for ( i = 0; i < num; ++i )
{
if ( !FT_New_Face( context, testfont, i, &face ) )
TestFace( face );
}
}
exit( 0 );
}
static int
extmatch( char* filename,
char** extensions )
{
int i;
char* pt;
if ( extensions == NULL )
return true;
pt = strrchr( filename, '.' );
if ( pt == NULL )
return false;
if ( pt < strrchr( filename, '/' ) )
return false;
for ( i = 0; extensions[i] != NULL; ++i )
if ( strcasecmp( pt + 1, extensions[i] ) == 0 ||
strcasecmp( pt, extensions[i] ) == 0 )
return true;
return false;
}
static void
figurefiletype( struct fontlist* item )
{
FILE* foo;
item->isbinary = item->isascii = item->ishex = false;
foo = fopen( item->name, "rb" );
if ( foo != NULL )
{
/* Try to guess the file type from the first few characters... */
int ch1 = getc( foo );
int ch2 = getc( foo );
int ch3 = getc( foo );
int ch4 = getc( foo );
fclose( foo );
if ( ( ch1 == 0 && ch2 == 1 && ch3 == 0 && ch4 == 0 ) ||
( ch1 == 'O' && ch2 == 'T' && ch3 == 'T' && ch4 == 'O' ) ||
( ch1 == 't' && ch2 == 'r' && ch3 == 'u' && ch4 == 'e' ) ||
( ch1 == 't' && ch2 == 't' && ch3 == 'c' && ch4 == 'f' ) )
{
/* ttf, otf, ttc files */
item->isbinary = true;
}
else if ( ch1 == 0x80 && ch2 == '\01' )
{
/* PFB header */
item->isbinary = true;
}
else if ( ch1 == '%' && ch2 == '!' )
{
/* Random PostScript */
if ( strstr( item->name, ".pfa" ) != NULL ||
strstr( item->name, ".PFA" ) != NULL )
item->ishex = true;
else
item->isascii = true;
}
else if ( ch1 == 1 && ch2 == 0 && ch3 == 4 )
{
/* Bare CFF */
item->isbinary = true;
}
else if ( ch1 == 'S' && ch2 == 'T' && ch3 == 'A' && ch4 == 'R' )
{
/* BDF */
item->ishex = true;
}
else if ( ch1 == 'P' && ch2 == 'F' && ch3 == 'R' && ch4 == '0' )
{
/* PFR */
item->isbinary = true;
}
else if ( ( ch1 == '\1' && ch2 == 'f' && ch3 == 'c' && ch4 == 'p' ) ||
( ch1 == 'M' && ch2 == 'Z' ) )
{
/* Windows FON */
item->isbinary = true;
}
else
{
fprintf( stderr,
"Can't recognize file type of `%s', assuming binary\n",
item->name );
item->isbinary = true;
}
}
else
{
fprintf( stderr, "Can't open `%s' for typing the file.\n",
item->name );
item->isbinary = true;
}
}
static void
FindFonts( char** fontdirs,
char** extensions )
{
DIR* examples;
struct dirent* ent;
int i, max;
char buffer[1025];
struct stat statb;
max = 0;
fcnt = 0;
for ( i = 0; fontdirs[i] != NULL; ++i )
{
examples = opendir( fontdirs[i] );
if ( examples == NULL )
{
fprintf( stderr,
"Can't open example font directory `%s'\n",
fontdirs[i] );
exit( 1 );
}
while ( ( ent = readdir( examples ) ) != NULL )
{
snprintf( buffer, sizeof ( buffer ),
"%s/%s", fontdirs[i], ent->d_name );
if ( stat( buffer, &statb ) == -1 || S_ISDIR( statb.st_mode ) )
continue;
if ( extensions == NULL || extmatch( buffer, extensions ) )
{
if ( fcnt >= max )
{
max += 100;
fontlist = realloc( fontlist, max * sizeof ( struct fontlist ) );
if ( fontlist == NULL )
{
fprintf( stderr, "Can't allocate memory\n" );
exit( 1 );
}
}
fontlist[fcnt].name = strdup( buffer );
fontlist[fcnt].len = statb.st_size;
figurefiletype( &fontlist[fcnt] );
++fcnt;
}
}
closedir( examples );
}
if ( fcnt == 0 )
{
fprintf( stderr, "Can't find matching font files.\n" );
exit( 1 );
}
fontlist[fcnt].name = NULL;
}
static int
getErrorCnt( struct fontlist* item )
{
if ( error_count == 0 && error_fraction == 0 )
return 0;
return error_count + ceil( error_fraction * item->len );
}
static int
getRandom( int low,
int high )
{
if ( low - high < 0x10000L )
return low + ( ( random() >> 8 ) % ( high + 1 - low ) );
return low + ( random() % ( high + 1 - low ) );
}
static int
copyfont( struct fontlist* item,
char* newfont )
{
static char buffer[8096];
FILE *good, *new;
int len;
int i, err_cnt;
good = fopen( item->name, "r" );
if ( good == NULL )
{
fprintf( stderr, "Can't open `%s'\n", item->name );
return false;
}
new = fopen( newfont, "w+" );
if ( new == NULL )
{
fprintf( stderr, "Can't create temporary output file `%s'\n",
newfont );
exit( 1 );
}
while ( ( len = fread( buffer, 1, sizeof ( buffer ), good ) ) > 0 )
fwrite( buffer, 1, len, new );
fclose( good );
err_cnt = getErrorCnt( item );
for ( i = 0; i < err_cnt; ++i )
{
fseek( new, getRandom( 0, item->len - 1 ), SEEK_SET );
if ( item->isbinary )
putc( getRandom( 0, 0xff ), new );
else if ( item->isascii )
putc( getRandom( 0x20, 0x7e ), new );
else
{
int hex = getRandom( 0, 15 );
if ( hex < 10 )
hex += '0';
else
hex += 'A' - 10;
putc( hex, new );
}
}
if ( ferror( new ) )
{
fclose( new );
unlink( newfont );
return false;
}
fclose( new );
return true;
}
static int child_pid;
static void
abort_test( int sig )
{
/* If a time-out happens, then kill the child */
kill( child_pid, SIGFPE );
write( 2, "Timeout... ", 11 );
}
static void
do_test( void )
{
int i = getRandom( 0, fcnt - 1 );
static int test_num = 0;
char buffer[1024];
sprintf( buffer, "%s/test%d", results_dir, test_num++ );
if ( copyfont ( &fontlist[i], buffer ) )
{
signal( SIGALRM, abort_test );
/* Anything that takes more than 20 seconds */
/* to parse and/or rasterize is an error. */
alarm( 20 );
if ( ( child_pid = fork() ) == 0 )
ExecuteTest( buffer );
else if ( child_pid != -1 )
{
int status;
waitpid( child_pid, &status, 0 );
alarm( 0 );
if ( WIFSIGNALED ( status ) )
printf( "Error found in file `%s'\n", buffer );
else
unlink( buffer );
}
else
{
fprintf( stderr, "Can't fork test case.\n" );
exit( 1 );
}
alarm( 0 );
}
}
static void
usage( FILE* out,
char* name )
{
fprintf( out, "%s [options] -- Generate random erroneous fonts\n"
" and attempt to parse them with FreeType.\n\n", name );
fprintf( out, " --all All non-directory files are assumed to be fonts.\n" );
fprintf( out, " --check-outlines Make sure we can parse the outlines of each glyph.\n" );
fprintf( out, " --dir <path> Append <path> to list of font search directories.\n" );
fprintf( out, " --error-count <cnt> Introduce <cnt> single byte errors into each font.\n" );
fprintf( out, " --error-fraction <frac> Introduce <frac>*filesize single byte errors\n"
" into each font.\n" );
fprintf( out, " --ext <ext> Add <ext> to list of extensions indicating fonts.\n" );
fprintf( out, " --help Print this.\n" );
fprintf( out, " --nohints Turn off hinting.\n" );
fprintf( out, " --rasterize Attempt to rasterize each glyph.\n" );
fprintf( out, " --results <dir> Directory in which to place the test fonts.\n" );
fprintf( out, " --size <float> Use the given font size for the tests.\n" );
fprintf( out, " --test <file> Run a single test on an already existing file.\n" );
}
int
main( int argc,
char** argv )
{
char **dirs, **exts;
char *pt, *end;
int dcnt = 0, ecnt = 0, rset = false, allexts = false;
int i;
time_t now;
char* testfile = NULL;
dirs = calloc( argc + 1, sizeof ( char ** ) );
exts = calloc( argc + 1, sizeof ( char ** ) );
for ( i = 1; i < argc; ++i )
{
pt = argv[i];
if ( pt[0] == '-' && pt[1] == '-' )
++pt;
if ( strcmp( pt, "-all" ) == 0 )
allexts = true;
else if ( strcmp( pt, "-check-outlines" ) == 0 )
check_outlines = true;
else if ( strcmp( pt, "-dir" ) == 0 )
dirs[dcnt++] = argv[++i];
else if ( strcmp( pt, "-error-count" ) == 0 )
{
if ( !rset )
error_fraction = 0;
rset = true;
error_count = strtol( argv[++i], &end, 10 );
if ( *end != '\0' )
{
fprintf( stderr, "Bad value for error-count: %s\n", argv[i] );
exit( 1 );
}
}
else if ( strcmp( pt, "-error-fraction" ) == 0 )
{
if ( !rset )
error_count = 0;
rset = true;
error_fraction = strtod( argv[++i], &end );
if ( *end != '\0' )
{
fprintf( stderr, "Bad value for error-fraction: %s\n", argv[i] );
exit( 1 );
}
}
else if ( strcmp( pt, "-ext" ) == 0 )
exts[ecnt++] = argv[++i];
else if ( strcmp( pt, "-help" ) == 0 )
{
usage( stdout, argv[0] );
exit( 0 );
}
else if ( strcmp( pt, "-nohints" ) == 0 )
nohints = true;
else if ( strcmp( pt, "-rasterize" ) == 0 )
rasterize = true;
else if ( strcmp( pt, "-results" ) == 0 )
results_dir = argv[++i];
else if ( strcmp( pt, "-size" ) == 0 )
{
font_size = (FT_F26Dot6)( strtod( argv[++i], &end ) * 64 );
if ( *end != '\0' || font_size < 64 )
{
fprintf( stderr, "Bad value for size: %s\n", argv[i] );
exit( 1 );
}
}
else if ( strcmp( pt, "-test" ) == 0 )
testfile = argv[++i];
else
{
usage( stderr, argv[0] );
exit( 1 );
}
}
if ( allexts )
exts = NULL;
else if ( ecnt == 0 )
exts = default_ext_list;
if ( dcnt == 0 )
dirs = default_dir_list;
if ( testfile != NULL )
ExecuteTest( testfile ); /* This should never return */
time( &now );
srandom( now );
FindFonts( dirs, exts );
mkdir( results_dir, 0755 );
forever
do_test();
return 0;
}
/* EOF */

File diff suppressed because it is too large Load diff

View file

@ -1,157 +0,0 @@
/*
* gcc -DFT2_BUILD_LIBRARY -I../../include -o test_afm test_afm.c \
* -L../../objs/.libs -lfreetype -lz -static
*/
#include <ft2build.h>
#include FT_FREETYPE_H
#include FT_INTERNAL_STREAM_H
#include FT_INTERNAL_POSTSCRIPT_AUX_H
void dump_fontinfo( AFM_FontInfo fi )
{
FT_Int i;
printf( "This AFM is for %sCID font.\n\n",
( fi->IsCIDFont ) ? "" : "non-" );
printf( "FontBBox: %.2f %.2f %.2f %.2f\n", fi->FontBBox.xMin / 65536.,
fi->FontBBox.yMin / 65536.,
fi->FontBBox.xMax / 65536.,
fi->FontBBox.yMax / 65536. );
printf( "Ascender: %.2f\n", fi->Ascender / 65536. );
printf( "Descender: %.2f\n\n", fi->Descender / 65536. );
if ( fi->NumTrackKern )
printf( "There are %d sets of track kernings:\n",
fi->NumTrackKern );
else
printf( "There is no track kerning.\n" );
for ( i = 0; i < fi->NumTrackKern; i++ )
{
AFM_TrackKern tk = fi->TrackKerns + i;
printf( "\t%2d: %5.2f %5.2f %5.2f %5.2f\n", tk->degree,
tk->min_ptsize / 65536.,
tk->min_kern / 65536.,
tk->max_ptsize / 65536.,
tk->max_kern / 65536. );
}
printf( "\n" );
if ( fi->NumKernPair )
printf( "There are %d kerning pairs:\n",
fi->NumKernPair );
else
printf( "There is no kerning pair.\n" );
for ( i = 0; i < fi->NumKernPair; i++ )
{
AFM_KernPair kp = fi->KernPairs + i;
printf( "\t%3d + %3d => (%4d, %4d)\n", kp->index1,
kp->index2,
kp->x,
kp->y );
}
}
int
dummy_get_index( const char* name,
FT_Offset len,
void* user_data )
{
if ( len )
return name[0];
else
return 0;
}
FT_Error
parse_afm( FT_Library library,
FT_Stream stream,
AFM_FontInfo fi )
{
PSAux_Service psaux;
AFM_ParserRec parser;
FT_Error error = FT_Err_Ok;
psaux = (PSAux_Service)FT_Get_Module_Interface( library, "psaux" );
if ( !psaux || !psaux->afm_parser_funcs )
return -1;
error = FT_Stream_EnterFrame( stream, stream->size );
if ( error )
return error;
error = psaux->afm_parser_funcs->init( &parser,
library->memory,
stream->cursor,
stream->limit );
if ( error )
return error;
parser.FontInfo = fi;
parser.get_index = dummy_get_index;
error = psaux->afm_parser_funcs->parse( &parser );
psaux->afm_parser_funcs->done( &parser );
return error;
}
int main( int argc,
char** argv )
{
FT_Library library;
FT_StreamRec stream;
FT_Error error = FT_Err_Ok;
AFM_FontInfoRec fi;
if ( argc < 2 )
return FT_Err_Invalid_Argument;
error = FT_Init_FreeType( &library );
if ( error )
return error;
FT_ZERO( &stream );
error = FT_Stream_Open( &stream, argv[1] );
if ( error )
goto Exit;
stream.memory = library->memory;
FT_ZERO( &fi );
error = parse_afm( library, &stream, &fi );
if ( !error )
{
FT_Memory memory = library->memory;
dump_fontinfo( &fi );
if ( fi.KernPairs )
FT_FREE( fi.KernPairs );
if ( fi.TrackKerns )
FT_FREE( fi.TrackKerns );
}
else
printf( "parse error\n" );
FT_Stream_Close( &stream );
Exit:
FT_Done_FreeType( library );
return error;
}

View file

@ -1,160 +0,0 @@
#include <ft2build.h>
#include FT_FREETYPE_H
#include FT_BBOX_H
#include <time.h> /* for clock() */
/* SunOS 4.1.* does not define CLOCKS_PER_SEC, so include <sys/param.h> */
/* to get the HZ macro which is the equivalent. */
#if defined(__sun__) && !defined(SVR4) && !defined(__SVR4)
#include <sys/param.h>
#define CLOCKS_PER_SEC HZ
#endif
static long
get_time( void )
{
return clock() * 10000L / CLOCKS_PER_SEC;
}
/* test bbox computations */
#define XSCALE 65536
#define XX(x) ((FT_Pos)(x*XSCALE))
#define XVEC(x,y) { XX(x), XX(y) }
#define XVAL(x) ((x)/(1.0*XSCALE))
/* dummy outline #1 */
static FT_Vector dummy_vec_1[4] =
{
#if 1
XVEC( 408.9111, 535.3164 ),
XVEC( 455.8887, 634.396 ),
XVEC( -37.8765, 786.2207 ),
XVEC( 164.6074, 535.3164 )
#else
{ (FT_Int32)0x0198E93DL , (FT_Int32)0x021750FFL }, /* 408.9111, 535.3164 */
{ (FT_Int32)0x01C7E312L , (FT_Int32)0x027A6560L }, /* 455.8887, 634.3960 */
{ (FT_Int32)0xFFDA1F9EL , (FT_Int32)0x0312387FL }, /* -37.8765, 786.2207 */
{ (FT_Int32)0x00A49B7EL , (FT_Int32)0x021750FFL } /* 164.6074, 535.3164 */
#endif
};
static char dummy_tag_1[4] =
{
FT_CURVE_TAG_ON,
FT_CURVE_TAG_CUBIC,
FT_CURVE_TAG_CUBIC,
FT_CURVE_TAG_ON
};
static short dummy_contour_1[1] =
{
3
};
static FT_Outline dummy_outline_1 =
{
1,
4,
dummy_vec_1,
dummy_tag_1,
dummy_contour_1,
0
};
/* dummy outline #2 */
static FT_Vector dummy_vec_2[4] =
{
XVEC( 100.0, 100.0 ),
XVEC( 100.0, 200.0 ),
XVEC( 200.0, 200.0 ),
XVEC( 200.0, 133.0 )
};
static FT_Outline dummy_outline_2 =
{
1,
4,
dummy_vec_2,
dummy_tag_1,
dummy_contour_1,
0
};
static void
dump_outline( FT_Outline* outline )
{
FT_BBox bbox;
/* compute and display cbox */
FT_Outline_Get_CBox( outline, &bbox );
printf( "cbox = [%.2f %.2f %.2f %.2f]\n",
XVAL( bbox.xMin ),
XVAL( bbox.yMin ),
XVAL( bbox.xMax ),
XVAL( bbox.yMax ) );
/* compute and display bbox */
FT_Outline_Get_BBox( outline, &bbox );
printf( "bbox = [%.2f %.2f %.2f %.2f]\n",
XVAL( bbox.xMin ),
XVAL( bbox.yMin ),
XVAL( bbox.xMax ),
XVAL( bbox.yMax ) );
}
static void
profile_outline( FT_Outline* outline,
long repeat )
{
FT_BBox bbox;
long count;
long time0;
time0 = get_time();
for ( count = repeat; count > 0; count-- )
FT_Outline_Get_CBox( outline, &bbox );
time0 = get_time() - time0;
printf( "time = %5.2f cbox = [%.2f %.2f %.2f %.2f]\n",
((double)time0/10000.0),
XVAL( bbox.xMin ),
XVAL( bbox.yMin ),
XVAL( bbox.xMax ),
XVAL( bbox.yMax ) );
time0 = get_time();
for ( count = repeat; count > 0; count-- )
FT_Outline_Get_BBox( outline, &bbox );
time0 = get_time() - time0;
printf( "time = %5.2f bbox = [%.2f %.2f %.2f %.2f]\n",
((double)time0/10000.0),
XVAL( bbox.xMin ),
XVAL( bbox.yMin ),
XVAL( bbox.xMax ),
XVAL( bbox.yMax ) );
}
#define REPEAT 100000L
int main( int argc, char** argv )
{
printf( "outline #1\n" );
profile_outline( &dummy_outline_1, REPEAT );
printf( "outline #2\n" );
profile_outline( &dummy_outline_2, REPEAT );
return 0;
}

View file

@ -1,236 +0,0 @@
#include <ft2build.h>
#include FT_FREETYPE_H
#include FT_TRIGONOMETRY_H
#include <math.h>
#include <stdio.h>
#define PI 3.14159265358979323846
#define SPI (PI/FT_ANGLE_PI)
/* the precision in 16.16 fixed float points of the checks. Expect */
/* between 2 and 5 noise LSB bits during operations, due to */
/* rounding errors.. */
#define THRESHOLD 64
static error = 0;
static void
test_cos( void )
{
FT_Fixed f1, f2;
double d1, d2;
int i;
for ( i = 0; i < FT_ANGLE_2PI; i += 0x10000 )
{
f1 = FT_Cos(i);
d1 = f1/65536.0;
d2 = cos( i*SPI );
f2 = (FT_Fixed)(d2*65536.0);
if ( abs( f2-f1 ) > THRESHOLD )
{
error = 1;
printf( "FT_Cos[%3d] = %.7f cos[%3d] = %.7f\n",
(i >> 16), f1/65536.0, (i >> 16), d2 );
}
}
}
static void
test_sin( void )
{
FT_Fixed f1, f2;
double d1, d2;
int i;
for ( i = 0; i < FT_ANGLE_2PI; i += 0x10000 )
{
f1 = FT_Sin(i);
d1 = f1/65536.0;
d2 = sin( i*SPI );
f2 = (FT_Fixed)(d2*65536.0);
if ( abs( f2-f1 ) > THRESHOLD )
{
error = 1;
printf( "FT_Sin[%3d] = %.7f sin[%3d] = %.7f\n",
(i >> 16), f1/65536.0, (i >> 16), d2 );
}
}
}
static void
test_tan( void )
{
FT_Fixed f1, f2;
double d1, d2;
int i;
for ( i = 0; i < FT_ANGLE_PI2-0x2000000; i += 0x10000 )
{
f1 = FT_Tan(i);
d1 = f1/65536.0;
d2 = tan( i*SPI );
f2 = (FT_Fixed)(d2*65536.0);
if ( abs( f2-f1 ) > THRESHOLD )
{
error = 1;
printf( "FT_Tan[%3d] = %.7f tan[%3d] = %.7f\n",
(i >> 16), f1/65536.0, (i >> 16), d2 );
}
}
}
static void
test_atan2( void )
{
FT_Fixed c2, s2;
double l, a, c1, s1;
int i, j;
for ( i = 0; i < FT_ANGLE_2PI; i += 0x10000 )
{
l = 5.0;
a = i*SPI;
c1 = l * cos(a);
s1 = l * sin(a);
c2 = (FT_Fixed)(c1*65536.0);
s2 = (FT_Fixed)(s1*65536.0);
j = FT_Atan2( c2, s2 );
if ( j < 0 )
j += FT_ANGLE_2PI;
if ( abs( i - j ) > 1 )
{
printf( "FT_Atan2( %.7f, %.7f ) = %.5f, atan = %.5f\n",
c2/65536.0, s2/65536.0, j/65536.0, i/65536.0 );
}
}
}
static void
test_unit( void )
{
FT_Vector v;
double a, c1, s1;
FT_Fixed c2, s2;
int i;
for ( i = 0; i < FT_ANGLE_2PI; i += 0x10000 )
{
FT_Vector_Unit( &v, i );
a = ( i*SPI );
c1 = cos(a);
s1 = sin(a);
c2 = (FT_Fixed)(c1*65536.0);
s2 = (FT_Fixed)(s1*65536.0);
if ( abs( v.x-c2 ) > THRESHOLD ||
abs( v.y-s2 ) > THRESHOLD )
{
error = 1;
printf( "FT_Vector_Unit[%3d] = ( %.7f, %.7f ) vec = ( %.7f, %.7f )\n",
(i >> 16),
v.x/65536.0, v.y/65536.0,
c1, s1 );
}
}
}
static void
test_length( void )
{
FT_Vector v;
FT_Fixed l, l2;
int i;
for ( i = 0; i < FT_ANGLE_2PI; i += 0x10000 )
{
l = (FT_Fixed)(500.0*65536.0);
v.x = (FT_Fixed)( l * cos( i*SPI ) );
v.y = (FT_Fixed)( l * sin( i*SPI ) );
l2 = FT_Vector_Length( &v );
if ( abs( l2-l ) > THRESHOLD )
{
error = 1;
printf( "FT_Length( %.7f, %.7f ) = %.5f, length = %.5f\n",
v.x/65536.0, v.y/65536.0, l2/65536.0, l/65536.0 );
}
}
}
static void
test_rotate( void )
{
FT_Fixed c2, s2, c4, s4;
FT_Vector v;
double l, ra, a, c1, s1, cra, sra, c3, s3;
int i, j, rotate;
for ( rotate = 0; rotate < FT_ANGLE_2PI; rotate += 0x10000 )
{
ra = rotate*SPI;
cra = cos( ra );
sra = sin( ra );
for ( i = 0; i < FT_ANGLE_2PI; i += 0x10000 )
{
l = 500.0;
a = i*SPI;
c1 = l * cos(a);
s1 = l * sin(a);
v.x = c2 = (FT_Fixed)(c1*65536.0);
v.y = s2 = (FT_Fixed)(s1*65536.0);
FT_Vector_Rotate( &v, rotate );
c3 = c1 * cra - s1 * sra;
s3 = c1 * sra + s1 * cra;
c4 = (FT_Fixed)(c3*65536.0);
s4 = (FT_Fixed)(s3*65536.0);
if ( abs( c4 - v.x ) > THRESHOLD ||
abs( s4 - v.y ) > THRESHOLD )
{
error = 1;
printf( "FT_Rotate( (%.7f,%.7f), %.5f ) = ( %.7f, %.7f ), rot = ( %.7f, %.7f )\n",
c1, s1, ra,
c2/65536.0, s2/65536.0,
c4/65536.0, s4/65536.0 );
}
}
}
}
int main( void )
{
test_cos();
test_sin();
test_tan();
test_atan2();
test_unit();
test_length();
test_rotate();
if (!error)
printf( "trigonometry test ok !\n" );
return !error;
}