Sitemap

Thursday, June 25, 2015

Java: Input and Output streams

java.io package provides I/O classes to manipulate streams. This package supports two types of streams:
1. binary streams which handle binary data. InputStream and OutputStream are high level interfaces for manipulating binary streams.
2. character streams which handle character data. Reader and Writer are high level interfaces for manipulating character streams. In this section, the main focus is on binary streams.

By default, most of the streams read or write one byte at a time. This causes poor I/O performance because it takes lot of time to read/write byte by byte when dealing with large amounts of data. I/O provides Buffered streams to override this byte by byte default behaviors.



import java.io.BufferedInputStream;
import java.io.BufferedOutputStream;
import java.io.FileInputStream;
import java.io.FileOutputStream;
import java.io.IOException;
import java.io.InputStream;
import java.io.OutputStream;

public class Main {

    private static final String SOURCE_FILE = "D:\\test.jar";

    public static void main(String[] args) {
        Main io = new Main();
        try {
            long startTime = System.currentTimeMillis();
            io.readWrite(SOURCE_FILE, "D:\\test1.jar");
            long endTime = System.currentTimeMillis();
            System.out.println("Time taken for reading and writing using default behaviour : " + (endTime - startTime) +
                               " milli seconds");

            long startTime1 = System.currentTimeMillis();
            io.readWriteBuffer(SOURCE_FILE, "D:\\test2.jar");
            long endTime1 = System.currentTimeMillis();
            System.out.println("Time taken for reading and writing using buffered streams : " +
                               (endTime1 - startTime1) + " milli seconds");

            long startTime2 = System.currentTimeMillis();
            io.readWriteArray(SOURCE_FILE, "D:\\test3.jar");
            long endTime2 = System.currentTimeMillis();
            System.out.println("Time taken for reading and writing using custom buffering : " +
                               (endTime2 - startTime2) + " milli seconds");
        } catch (IOException e) {
            e.printStackTrace();
        }
    }

    public static void readWrite(String fileFrom, String fileTo) throws IOException {
        InputStream in = null;
        OutputStream out = null;
        try {
            in = new FileInputStream(fileFrom);
            out = new FileOutputStream(fileTo);
            while (true) {
                int bytedata = in.read();
                if (bytedata == -1)
                    break;
                out.write(bytedata);
            }
        } finally {
            if (in != null)
                in.close();
            if (out != null)
                out.close();
        }
    }

    public static void readWriteBuffer(String fileFrom, String fileTo) throws IOException {
        InputStream inBuffer = null;
        OutputStream outBuffer = null;
        try {
            InputStream in = new FileInputStream(fileFrom);
            inBuffer = new BufferedInputStream(in);
            OutputStream out = new FileOutputStream(fileTo);
            outBuffer = new BufferedOutputStream(out);
            while (true) {
                int bytedata = inBuffer.read();
                if (bytedata == -1)
                    break;
                out.write(bytedata);
            }
        } finally {
            if (inBuffer != null)
                inBuffer.close();
            if (outBuffer != null)
                outBuffer.close();
        }
    }

    public static void readWriteArray(String fileFrom, String fileTo) throws IOException {
        InputStream in = null;
        OutputStream out = null;
        try {
            in = new FileInputStream(fileFrom);
            out = new FileOutputStream(fileTo);
            int availableLength = in.available();
            byte[] totalBytes = new byte[availableLength];
            int bytedata = in.read(totalBytes);
            out.write(totalBytes);

        } finally {
            if (in != null)
                in.close();
            if (out != null)
                out.close();
        }
    }
}


OUTPUT
Time taken for reading and writing using default behaviour : 5188 milli seconds
Time taken for reading and writing using buffered streams : 3105 milli seconds
Time taken for reading and writing using custom buffering : 7 milli seconds

Java: Properties Class

A Properties object is a persistent Hashtable that stores key–value pairs of Strings. By "persistent", we mean that the Properties object can be written to an output stream (possibly a file) and read back in through an input stream. A common use of Properties objects in prior versions of Java was to maintain application-configuration data or user preferences for applications.

import java.io.FileOutputStream;
import java.io.FileInputStream;
import java.io.IOException;

import java.util.Properties;
import java.util.Set;

public class PropertiesTest {
    public static void main(String[] args) {
        Properties table = new Properties();

        // set properties
        table.setProperty("color", "blue");
        table.setProperty("width", "200");

        System.out.println("After setting properties");
        listProperties(table);

        // replace property value
        table.setProperty("color", "red");

        System.out.println("After replacing properties");
        listProperties(table);

        saveProperties(table);

        table.clear(); // empty table

        System.out.println("After clearing properties");
        listProperties(table);

        loadProperties(table);

        // get value of property color
        Object value = table.getProperty("color");

        // check if value is in table
        if (value != null)
            System.out.printf("Property color's value is %s%n", value);
        else
            System.out.println("Property color is not in table");
    }

    // save properties to a file

    private static void saveProperties(Properties props) {
        // save contents of table
        try {
            FileOutputStream output = new FileOutputStream("props.dat");
            props.store(output, "Sample Properties"); // save properties
            output.close();
            System.out.println("After saving properties");
            listProperties(props);
        } catch (IOException ioException) {
            ioException.printStackTrace();
        }
    }

    // load properties from a file

    private static void loadProperties(Properties props) {
        // load contents of table
        try {
            FileInputStream input = new FileInputStream("props.dat");
            props.load(input); // load properties
            input.close();
            System.out.println("After loading properties");
            listProperties(props);
        } catch (IOException ioException) {
            ioException.printStackTrace();
        }
    }

    // output property values

    private static void listProperties(Properties props) {
        Set<object> keys = props.keySet(); // get property names

        // output name/value pairs
        for (Object key : keys)
            System.out.printf("%s\t%s%n", key, props.getProperty((String)key));

        System.out.println();
    }
}


OUTPUT
After setting properties
color blue
width 200

After replacing properties
color red
width 200

After saving properties
color red
width 200

After clearing properties

After loading properties
color red
width 200

Property color's value is red


Java: Apache Ant

build.properties
src.dir=src
classes.dir=classes
main-class=com.mypkg.PortfolioManager
lib.dir=lib
docs.dir=docs
projectName=AntTutorial

build.xml
<?xml version="1.0" encoding="windows-1252" ?>
<!--Ant buildfile generated by Oracle JDeveloper-->
<!--Generated Apr 20, 2015 4:09:46 PM-->
<project xmlns="antlib:org.apache.tools.ant" name="Project" default="all" basedir=".">
  <property file="build.properties"/>

  <target name="clean">
    <delete dir="${classes.dir}"/>
    <delete dir="${docs.dir}"/>
  </target>

  <target name="init">
    <mkdir dir="${classes.dir}"/>
    <!--<mkdir dir="${docs.dir}"/>-->
  </target>

  <path id="classpath">
    <fileset dir="${lib.dir}" includes="**/*.jar"/>
  </path>

  <target name="compile" depends="init">
    <javac srcdir="${src.dir}" destdir="${classes.dir}" classpathref="classpath"/>
  </target>

  <!--<target name="docs" depends="compile">
    <javadoc packagenames="src" sourcepath="${src.dir}" destdir="${docs.dir}">
       <fileset dir="${src.dir}">
                <include name="**" />
           </fileset>
    </javadoc>
  </target>-->

  <target name="jar" depends="compile">
    <jar destfile="${projectName}.jar" basedir="${classes.dir}">
      <manifest>
        <attribute name="Main-Class" value="${main-class}"/>
      </manifest>
    </jar>
  </target>

  <target name="main" depends="clean,compile,jar"/>
</project>

Download the sample project from the file cabinet: AntTut.zip
More Info: http://www.tutorialspoint.com/ant/index.htm

Java: Dynamic Polymorphism

Polymorphism in Java has two types:
1. Compile time polymorphism (static binding)
2. Runtime polymorphism (dynamic binding).

Method overloading is an example of static polymorphism, while method overriding is an example of dynamic polymorphism.

public class Vehicle {
    public void move() {
        System.out.println("Vehicles can move!");
    }
}

class MotorBike extends Vehicle{
    public void move(){
    System.out.println("MotorBike can move and accelerate too!");
    }
}

class Test{
    public static void main(String[] args) {
        Vehicle vh = new MotorBike();
        vh.move(); // prints MotorBike can move and accelerate too!
        vh = new Vehicle();
        vh.move(); // prints Vehicles can move!
    }
}

It should be noted that in the first call to move(), the reference type is Vehicle and the object being referenced is MotorBike. So, when a call to move() is made, Java waits until runtime to determine which object is actually being pointed to by the reference.  In this case, the object is of the class MotorBike. So, the move() method of MotorBike class will be called. In the second call to move(), the object is of the class Vehicle. So, the move() method of Vehicle will be called.

More Info: http://www.javatpoint.com/static-binding-and-dynamic-binding
Info on typecasting: http://www.c4learn.com/java/java-type-casting-inheritance/

Java: Interfaces


public interface Language {
    String getBirthday();

    String getGreeting();
}

public class Indonesian implements Language {
    public String getBirthday() {
        return "Selamat Ulang Tahun";
    }

    public String getGreeting() {
        return "Apa kabar?";
    }
}

public class English implements Language {
    public String getBirthday() {
        return "Happy Birthday";
    }

    public String getGreeting() {
        return "How are you?";
    }
}

public class LanguageDemo {
    public static void main(String[] args) {
        Language language = new English();
        System.out.println(language.getBirthday());
        System.out.println(language.getGreeting());

        language = new Indonesian();
        System.out.println(language.getBirthday());
        System.out.println(language.getGreeting());
    }
}



OUTPUT:
Happy Birthday
How are you?
Selamat Ulang Tahun
Apa kabar?


Java: Decompiling using Procyon

import java.io.BufferedReader;
import java.io.File;
import java.io.IOException;
import java.io.InputStreamReader;

import java.nio.file.FileSystems;
import java.nio.file.FileVisitResult;
import java.nio.file.Files;
import java.nio.file.Path;
import java.nio.file.SimpleFileVisitor;
import java.nio.file.attribute.BasicFileAttributes;

import java.util.ArrayList;
import java.util.List;

public class ProcyonDecompiler {

    private static String JAR_DIRECTORY = "C:\\ucm";
    private static String DECOMPILER_FILEPATH = "lib/procyon-decompiler-0.5.29.jar";
    private static List<Path> fileList = new ArrayList<Path>();

    public static void main(String[] args) throws IOException, InterruptedException {
        walkDirectory();
        for (int i = 0; i < fileList.size(); i++) {
            Path absolutePath = fileList.get(i).toAbsolutePath();
            Path parentPath = fileList.get(i).getParent();
            Path fileName = fileList.get(i).getFileName();
            executeJarFile(absolutePath.toString(), parentPath.toString(), trimFileExtension(fileName.toString()));
        }
    }

    private static void executeJarFile(String absolutePath, String parentPath, String filename) throws IOException,
                                                                                                       InterruptedException {
        ProcessBuilder pb =
            new ProcessBuilder("java", "-jar", DECOMPILER_FILEPATH, "-jar", absolutePath, "-o",
                               parentPath + File.separator + filename);
        Process p = pb.start();
        BufferedReader in = new BufferedReader(new InputStreamReader(p.getInputStream()));
        String s = "";
        while ((s = in.readLine()) != null) {
            System.out.println(s);
        }
        int status = p.waitFor();
        System.out.println("Exited with status: " + status);
    }

    private static String trimFileExtension(String filename) {
        int pos = filename.lastIndexOf(".");
        if (pos > 0) {
            filename = filename.substring(0, pos);
        }
        return filename;
    }

    public static void walkDirectory() throws IOException {
        Path start = FileSystems.getDefault().getPath(JAR_DIRECTORY);
        Files.walkFileTree(start, new SimpleFileVisitor<Path>() {
            @Override
            public FileVisitResult visitFile(Path file, BasicFileAttributes attrs) throws IOException {
                if (file.toString().endsWith(".jar")) {
                    //System.out.println(file);
                    fileList.add(file);
                }
                return FileVisitResult.CONTINUE;
            }
        });
    }
}

Java: Extract a JAR file

import java.io.IOException;

import java.nio.file.Files;
import java.nio.file.Path;
import java.nio.file.Paths;
import java.nio.file.StandardCopyOption;

import java.util.Enumeration;
import java.util.jar.JarEntry;
import java.util.jar.JarFile;

public class UnzipJar {

    private static final String JAR_PATH = "D:\\test\\Java_gpl_v1.04.jar";

    public static void unzipJarFile(Path jar) throws IOException {
        if (!Files.exists(jar))
            return;

        String fnJar = jar.getFileName().toString();
        String fn = fnJar.substring(0, fnJar.lastIndexOf(".jar"));
        System.out.println(fnJar + " " + fn);
        Path dst = jar.getParent().resolve(fn);
        Files.createDirectory(dst);
        JarFile jf = new JarFile(jar.toString());

        //create directory
        for (Enumeration<JarEntry> enums = jf.entries(); enums.hasMoreElements();) {
            JarEntry entry = enums.nextElement();
            if (entry.isDirectory()) {
                Files.createDirectories(dst.resolve(entry.getName()));
            }
        }
        //copy file
        for (Enumeration<JarEntry> enums = jf.entries(); enums.hasMoreElements();) {
            JarEntry entry = enums.nextElement();
            if (!entry.isDirectory()) {
                Files.copy(jf.getInputStream(entry), dst.resolve(entry.getName()), StandardCopyOption.REPLACE_EXISTING);
            }
        }
    }

    public static void main(String[] args) throws IOException {
        unzipJarFile(Paths.get(JAR_PATH));
    }
}


Monday, June 22, 2015

SelectivelyRefineAndIndex Component

Component Information:
The ability to control the conversion and indexing of content is accessible through two resource includes defined in the component.

The first resource include is called "pre_submit_to_conversion". Before a content item is sent to the Inbound Refinery, this resource include is executed. Within this include, the administrator may manipulate the "dConversion" variable depending on the metadata of the document. Setting this variable to "PASSTHRU" will cause that content item to skip conversion, while setting it to "MultipageTiff" will cause that document to use the "MultipageTiff" conversion. Now, instead of merely relying upon file type to determine conversion settings, the administrator may use the value of any metadata fields.

Download from the link below:
http://www.oracle.com/technetwork/middleware/content-management/index-092832.html


I read this one note on metalink where the items are being checked in using Batchloader or IdcCommand. The files for these items are sent to the IBR for conversion. And the user wants that specific files that would normally be sent to the IBR be set for passthru and not be converted.

In the batchloader text file or hda file add the following parameters:

webViewableFile=<source file>.<source file extension>
webViewableFile:path=<source file path>/<source file>.<source file extension>
dWebExtension=<source file extension>

Example:
IdcService=CHECKIN_NEW
primaryFile=/tmp/AutoArchive.doc
dDocType=TEST
dDocTitle=TEST PASSTHRU 15
dSecurityGroup=Public
dDocAccount=Account1
dDocAuthor=pjolson
webViewableFile=AutoArchive.doc
webViewableFilePath=/tmp/AutoArchive.doc
dWebExtension=doc
xStorageRule=JDBC_Storage_Webless
<>

Note that this example is using a webless FSP rule. But even if designating the webviewable file, it still won't add it to the weblayout directory or the FileStorage table.

SortSpec when Configured for OracleTextSearch

When the Content Server is set for SearchIndexerEngineName=OracleTextSearch, only fields in SDATA sections can be used for sorting. Using SortSpec to sort the search results, the correct syntax is:
&SortSpec=<Field> <ASC or DESC>, <Field> <ASC or DESC>

Fields that are optimized as SDATA sections will require the prefix 'sd'. Two standard fields that are already optimized and will require the prefix are dDocName and dDocTitle.
&SortSpec=sddDocTitle ASC,sddDocName ASC,dInDate DESC

and this is how we will be passing the SortSpec key in the binder:
requestBinder.putLocal("SortSpec","sddDocName ASC, dInDate DESC");


To get the full list of SDATA sections
1. Go to UCM Administration --> Configuration. Note the Active Index value. It will be either ots1 or ots2.
2. On the Content Server database schema run the following spool script on the active index:
set long 2000000 
set pages 0 
set heading off 
set feedback off 
spool /tmp/outputfile.txt 
select ctx_report.create_index_script('<Active Index>') from dual; 
spool off

NOTE: The <Active Index> is the index the script will be run against. If the active index is ots1 the ctx_report.create_index_script will be run using FT_IDCTEXT1. If the active index is ots2 the report will be run using FT_IDCTEXT2.
Example: select ctx_report.create_index_script('FT_IDCTEXT1') from dual; 

After the spool completes edit the outputfile.txt file. Look for these entries: ctx_ddl.add_sdata_section. These are the fields that have been configured to be SDATA sections. These will include any fields that were set to be optimized in the Text Search Admin page. Also look for these entries: ctx_ddl.add_sdata_column. These also are SDATA sections. If the number of ctx_ddl.add_sdata_section and ctx_ddl.add_sdata_column entries equals 32 then no new fields can be set to be optimized.


To workaround this limit
Since the 32 limit cannot be increased, one way to reduce the number of SDATA section fields is to disable indexing on the custom metadata fields that won't be required to be searched on. In the ctx_ddl.add_sdata_section and ctx_ddl.add_sdata_column entries, these are the fields that will have an x or sdx prefix.

1. Go into Configuration Manager --> Information Fields
2. Select the field that isn't required for searches
3. Click the Edit button
4. Uncheck the Enable for Search Index box
5. Repeat the previous steps for the other unneeded fields
6. Rebuild the search index
7. Run the select ctx_report.create_index_script again and confirm that the field or fields are no longer marked as an SDATA section

NOTE: There is a fixed limit of 32 SDATA sections that can be present at one time. This limitation of Oracle Text has been solved as of database version 12.1.0.1.0.  This fix has also been backported to version 11.2.0.3.0.  It's also included in the 11.2.0.4.0 patchset for the database. See Note 1562142.1.
After upgrading or patching the database to raise this limit, there is another change needed for WCC to recognize the new limit.  See the Note:1607548.1

Friday, June 19, 2015

UCM: Running indexer from the command line

First make sure that stand alone applets are working. If not working, please check Note 1265076.1. Basically first its resetting the password of sysadmin user like
UPDATE USERS SET DPASSWORD='welcome1' WHERE DNAME='sysadmin';
UPDATE USERS SET DPASSWORDENCODING='' WHERE DNAME='sysadmin';

and then setting the JDBC connection in the System Properties application.

Create indexer.hda. This is the data that needs to be present in the file:
<?hda version="5.1.1 (build011203)" jcharset=Cp1252 encoding=iso-8859-1?>
#Full Collection Rebuild
@Properties LocalData
IdcService=CONTROL_SEARCH_INDEX
cycleID=rebuild
action=start
getStatus=1
fastRebuild=0
GetCurrentIndexingStatus=1
PerformProcessConversion=1
@end
<<EOF>>

Run the following command
IdcCommand -f C:\Work\indexer.hda -u sysadmin -l C:\Work\indexer.log

NOTE: To perform a "fast" rebuild instead of a "full" collection rebuild, set fastRebuild=1 in the HDA file

NOTE: If you receive Error: Executing 'CONTROL_SEARCH_INDEX command, then make the following entry in intradoc.cfg file
IdcCommandServerHost=10.141.107.1 or IdcCommandServerHost=localhost

UCM: ClassNames

Service
• This class can be specialized to override existing methods or add new methods. Service class includes methods for clearing local data, maping result sets, renaming values, loading and validating values, caching shared tables, checking security, forcing login, getting and filling user data, loading meta data options lists, and refreshing chached data.

DocService
• This class can be specialized to override existing methods or add new methods. DocService includes methods for executing services of DocServiceHandler and computing document URLs.

MetaService
• This class can be specialized to override existing methods or add new methods. MetaService includes methods for updating meta data definitions, updating option lists, updating templates, and getting option lists.

DocHandlerFactory
• DocHandlerFactory can be implemented to create a customized version of DocServiceHandler. The class which implements DocHandlerFactory must be registered to DocService using the static service setHandlerFactory(). DocServiceHandler includes methods for checking in content, handling subscriptions, handling the format wizard, loading default information fields, validating information fields, updating and deleting content items.

PageHandlerService
• This class can be specialized to override existing methods or add new methods. PageHandlerService includes methods for performing report queries and outputting historical reports.

WorkflowDocImplementor
• This class can be specialized to augment the behavior of workflow handling. WorkflowDocImplementor includes methods for handling general and criteria workflow.

RevisionImplementor
• This class can be specialized to augment the behavior of major and minor revision control.

SecurityImplementor
• SecurityImplementor can be implemented to override current security handling. You can extend ServiceSecurityImplementor to augment the current behavior.

Migrate contents present in framework folders to another instance

Replicating the folder structure for Framework Folders from one server to another is accomplished by using Archiver to replicate the necessary tables.

In Archiver, if you just want to move the folder structure, you would export/import these tables:
FolderFolders
FolderMetaDefaults

If you want to move content also, you will also need FolderFiles table.

When configuring the Archive to export, include the Framework Folders tables:

  + navigate to the "Export Data" tab
  + click on the "Table" tab
  + click "Add" and include the following tables: FolderFolders, FolderFiles, FolderMetaDefaults

This should allow new folders & folder changes to be migrated/replicated.

NOTE: by default all files/content will be archived as well. If you only want to export the Folder structure then add a dummy condition to the Export Query under the "Content" tab (e.g. where Content ID is -1)

See the documentation here for steps on how to add a table to an archive: http://docs.oracle.com/cd/E23943_01/doc.1111/e10792/c08_migration.htm#CHDJAABJ






Since migrating folders implies exporting tables, you can specify an export query, where you can tell which folder you want to export. 
You can for example specify in the export query the fFolderGuid of 'Folder2' so that only that folder gets exported. If you export the files you would have to export the FolderFiles tables, and specify the fParentGuid with the ID of 'Folder2' (fFolderGuid). 





If you want to export all sub folders, you need to create a custom Query Expression, that recursively gets all sub folders. 

For example, to export all subfolders from FolderFolders, starting with the folder with the ID 6CCC155115BC5BF7D9EEA691B1EBB41F, use: 

fFolderGUID IN (select ff.fFolderGUID 
from folderfolders ff 
connect by prior ff.ffolderguid = ff.fparentguid 
start with ff.ffolderguid = '6CCC155115BC5BF7D9EEA691B1EBB41F') 

You can re-use the same custom Query expression for FolderMetaDefaults 

to export all files in a folder hierarchy, you need to use the query below instead: 

fParentGUID IN (select ff.fFolderGUID 
from folderfolders ff 
connect by prior ff.ffolderguid = ff.fparentguid 
start with ff.ffolderguid = '6CCC155115BC5BF7D9EEA691B1EBB41F') 

Text Extraction Process manually: textexport

The text extraction process is performed using the "textexport" program that comes bundled in the ContentAccess component. This component contains Oracle OutsideIn functionality. The textexport program reads the PDF file and places all of the extracted text into the active collection folder (ots1 or ots2) under
<ucm-install>/search/ots1/bulkload/~export

To preserve this file, open the Repository Manager applet, and on the Indexing tab, click the Configuration button. On the popup that displays, the debug level can be set to trace.

If you want to see the text extraction process, you need to run the textexport manually. Create an HDA testfile.hda file where the input file parameter will need to be set to a valid path:
<?hda version="10.1.3.5.1 (111229)" jcharset=UTF8 encoding=utf-8?>
@Properties LocalData
OutputCharacterSet=utf8
blFieldTypes=
FallbackFormat=fi_unicode
InputFilePath=C:\Users\sonal\Downloads\pdf.pdf
blDateFormat=M/d/yy {h:mm[:ss] {aa}[zzz]}!mAM,PM!tAmerica/Chicago
@end

Run the following command from the cmd:
C:\Oracle\Oracle_ECM1\oit\win32\lib\contentaccess\textexport.exe -c C:\testfile.hda -f C:\finaltextfile.txt

finaltextfile.txt will contain the extracted text from the pdf file mentioned in the HDA file.
fi_unicode: Display as text and assume the Unicode character set.

Thursday, June 18, 2015

UCM: Indexing

To configure UCM to place a .hcst file in the weblayout directory instead of a copy of the native file, set IndexVaultFile=true. This will work only when the file is a passthru file (didn't go through IBR). The .hcst file in the weblayout points to the vault file only.
IndexVaultFile=true
NOTE: IndexVaultFile=true was replaced with UseNativeFormatInIndex=true. Either of these configuration settings will force the indexer to index the native file.
NOTE: When using webless storage, use UseNativeFormatInIndex=true. IndexVaultFile=true should not be used at all.

If the above env variable is set as true, and still the user wants to allow some documents to be copied to the weblayout directory
IndexVaultExclusionWildcardFormats=*/hcs*|*/ttp|*/xsl|*/wml|*template*|*/jsp*|*/gif|*/png|*/pdf|*/doc*|*/msword|*/*ms-excel|text/plain


When a large file is being indexed, and textexport times out, you can increase the timeout. The default value is 15 seconds.
TextExtractorTimeoutInSec=60
IndexerTextExtractionGuardTimeout=60


UCM will not index files larger than 10485760(10 MB) by default unless the configuration entry MaxIndexableFileSize is set (in this example 20 MB). Setting this to 0 (zero) stops full text indexing but still allows use of Oracle Text Search. This is useful if you still need case insensitive searches but do not need full text indexing.
MaxIndexableFileSize=20971520


This parameter lists what formats will be text indexed. If a file format extension is not on the list, the textexport will not get invoked and it will be indexed as metadata only.
TextIndexerFilterFormats=pdf,msword,ms-word,doc*,ms-excel,xls*,ms-powerpoint,powerpoint,ppt*,rtf,xml,msg,zip

More information in depth: Doc ID 445871.1

Wednesday, June 10, 2015

UCM: Deleting contents or folders inside a framework folders

Use FLD_DELETE service to delete files or folders.
binder.putLocal("IdcService", "FLD_DELETE")

// Deleting a folder. Use any of the 2 ways below
  binder.putLocal("item1", "fFolderGUID:69A93E7E99FA46CC35CBCEA0E1B9F8DB")
  // binder.putLocal("item1", "path:/Enterprise Libraries/My Library/Folder2")

// Deleting a file. Use any of the 2 ways below
  binder.putLocal("item2", "fFileGUID:8F9E18BB9D8609A0E07F391C8A3737F4")
  //binder.putLocal("item2", "path:/Enterprise Libraries/My Library/Folder4/file1.txt")


UCM: Checkin new content inside a framework folder using RIDC


public class CheckinFrameworkRIDC {

    public static void main(String[] args) {
        IdcClientManager manager = new IdcClientManager();
        try {
            // Creating a new IdcClient Connection using idc protocol
            //IdcClient idcClient = manager.createClient("idc://localhost:4444");
   //IdcContext userContext = new IdcContext("sysadmin");

            // Creating a new IdcClient Connection using HTTP protocol
            IdcClient idcClient = manager.createClient("http://localhost:16200/cs/idcplg");
     IdcContext userContext = new IdcContext("weblogic", "welcome1");
   
            HdaBinderSerializer serializer = new HdaBinderSerializer("UTF-8", idcClient.getDataFactory());
            DataBinder dataBinder = idcClient.createBinder();
            dataBinder.putLocal("IdcService", "CHECKIN_NEW");
            dataBinder.putLocal("dDocTitle", "Framework Folder Testing");
            dataBinder.putLocal("dDocType", "Document");
            dataBinder.putLocal("dSecurityGroup", "Public");
            dataBinder.addFile("primaryFile", new File("C:\\samplefile.txt"));
            dataBinder.putLocal("doFileCopy", "true");
            dataBinder.putLocal("dDocAuthor", "weblogic");
            
            // Either fParentGUID or parentFolderPath/fParentPath needs to be passed. Meatadata defaults are copied from the folder except SecurityGroup and Account
            //dataBinder.putLocal("fParentGUID", "5B0AC7C33BF951078772DFF757535B99");
            dataBinder.putLocal("fParentPath", "/Contribution Folders/ElPiju/Straw Bale/resources");
   
            serializer.serializeBinder(System.out, dataBinder);
            ServiceResponse response = idcClient.sendRequest(userContext, dataBinder);
            DataBinder responseData = response.getResponseAsBinder();
            serializer.serializeBinder(System.out, responseData);
        } catch (IdcClientException ice) {
            ice.printStackTrace();
        } catch (IOException ioe) {
            ioe.printStackTrace();
        }
    }
}


UCM: Framework Folders

When using the FrameworkFolders component, each folder is identified by its fFolderGUID and fParentGUID values. The fFolderGUID is the unique identifier for the folder, and the fParentGUID is set to the value of fFolderGUID for the folder's parent folder.



To list all the 2 contents under Cob folder:
SELECT ffileguid, ffilename, ddocname FROM folderfiles WHERE fparentguid='94B2FCB3A3D15A27E96B927CA80A3BD7';

To list all the 3 folders which are present under Cob folder:
SELECT ffolderguid, ffoldername FROM folderfolders WHERE fparentguid='94B2FCB3A3D15A27E96B927CA80A3BD7';

NOTE: A content item is not associated with fFolderGUID. It has only fParentGUID.