Skip to content

Commit

Permalink
#69 Add the implementation of the debugging feature.
Browse files Browse the repository at this point in the history
  • Loading branch information
yruslan committed Mar 19, 2020
1 parent a395c9b commit 12f87c4
Show file tree
Hide file tree
Showing 8 changed files with 1,465 additions and 324 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -24,9 +24,9 @@ import za.co.absa.cobrix.cobol.parser.ast.datatype.{AlphaNumeric, Integral}
import za.co.absa.cobrix.cobol.parser.ast.{BinaryProperties, Group, Primitive, Statement}
import za.co.absa.cobrix.cobol.parser.common.Constants
import za.co.absa.cobrix.cobol.parser.decoders.FloatingPointFormat.FloatingPointFormat
import za.co.absa.cobrix.cobol.parser.decoders.{DecoderSelector, FloatingPointFormat}
import za.co.absa.cobrix.cobol.parser.decoders.{DecoderSelector, FloatingPointFormat, StringDecoders}
import za.co.absa.cobrix.cobol.parser.encoding.codepage.{CodePage, CodePageCommon}
import za.co.absa.cobrix.cobol.parser.encoding.{EBCDIC, Encoding}
import za.co.absa.cobrix.cobol.parser.encoding.{EBCDIC, Encoding, HEX}
import za.co.absa.cobrix.cobol.parser.exceptions.SyntaxErrorException
import za.co.absa.cobrix.cobol.parser.policies.StringTrimmingPolicy.StringTrimmingPolicy
import za.co.absa.cobrix.cobol.parser.policies.{CommentPolicy, StringTrimmingPolicy}
Expand Down Expand Up @@ -136,14 +136,14 @@ object CopybookParser {
val nonTerms: Set[String] = (for (id <- nonTerminals)
yield transformIdentifier(id)
).toSet

val correctedFieldParentMap = transformIdentifierMap(fieldParentMap)
validateFieldParentMap(correctedFieldParentMap)

new Copybook(
if (dropGroupFillers) {
addDebugFields(
calculateNonFillerSizes(
calculateNonFillerSizes(
addDebugFields(
setSegmentParents(
markSegmentRedefines(
processGroupFillers(
Expand All @@ -152,12 +152,12 @@ object CopybookParser {
calculateBinaryProperties(schemaANTLR), nonTerms, enc, stringTrimmingPolicy, ebcdicCodePage, asciiCharset, isUtf16BigEndian, floatingPointFormat)
)
), segmentRedefines), correctedFieldParentMap
)
), isDebug
), isDebug
)
)
} else {
addDebugFields(
calculateNonFillerSizes(
calculateNonFillerSizes(
addDebugFields(
setSegmentParents(
markSegmentRedefines(
renameGroupFillers(
Expand All @@ -166,8 +166,8 @@ object CopybookParser {
calculateBinaryProperties(schemaANTLR), nonTerms, enc, stringTrimmingPolicy, ebcdicCodePage, asciiCharset, isUtf16BigEndian, floatingPointFormat)
)
), segmentRedefines), correctedFieldParentMap
)
), isDebug
), isDebug
)
)
}
)
Expand All @@ -183,7 +183,7 @@ object CopybookParser {
): CopybookAST = {

def getNonTerminalName(name: String, parent: Group): String = {
val existingNames = parent.children.map{
val existingNames = parent.children.map {
case x: Primitive => x.name
case x: Group => x.name
}
Expand All @@ -198,10 +198,10 @@ object CopybookParser {
}

val newChildren: ArrayBuffer[Statement] = new ArrayBuffer[Statement]()
for(stmt <- copybook.children) {
for (stmt <- copybook.children) {
stmt match {
case s: Primitive => newChildren.append(s)
case g: Group => {
case g: Group =>
if (nonTerminals contains g.name) {
newChildren.append(
addNonTerminals(g, nonTerminals, enc, stringTrimmingPolicy, ebcdicCodePage, asciiCharset, isUtf16BigEndian, floatingPointFormat).copy(isRedefined = true)(g.parent)
Expand All @@ -224,7 +224,6 @@ object CopybookParser {
newChildren.append(
addNonTerminals(g, nonTerminals, enc, stringTrimmingPolicy, ebcdicCodePage, asciiCharset, isUtf16BigEndian, floatingPointFormat)
)
}
}
}
copybook.copy(children = newChildren)(copybook.parent)
Expand Down Expand Up @@ -269,12 +268,11 @@ object CopybookParser {

val childWithSizes = child match {
case group: Group => calculateSchemaSizes(group)
case st: Primitive => {
case st: Primitive =>
val size = st.getBinarySizeBytes
val sizeAllOccurs = size * st.arrayMaxSize
val binProps = BinaryProperties(st.binaryProperties.offset, size, sizeAllOccurs)
st.withUpdatedBinaryProperties(binProps)
}
}
redefinedSizes += childWithSizes.binaryProperties.actualSize
redefinedNames += childWithSizes.name.toUpperCase
Expand Down Expand Up @@ -434,7 +432,7 @@ object CopybookParser {
case p: Primitive =>
ensureSegmentRedefinesAreIneGroup(p.name, isCurrentFieldASegmentRedefine = false)
p
case g: Group => {
case g: Group =>
if (isOneOfSegmentRedefines(g)) {
if (foundRedefines.contains(g.name)) {
throw new IllegalStateException(s"Duplicate segment redefine field '${g.name}' found.")
Expand All @@ -446,7 +444,6 @@ object CopybookParser {
ensureSegmentRedefinesAreIneGroup(g.name, isCurrentFieldASegmentRedefine = false)
g
}
}
}
group.copy(children = childrenWithSegmentRedefines)(group.parent)
}
Expand Down Expand Up @@ -756,7 +753,42 @@ object CopybookParser {
* @return The same AST with debugging fields added
*/
private def addDebugFields(ast: CopybookAST, addDebuggingFields: Boolean): CopybookAST = {
ast
def getDebugField(field: Primitive): Primitive = {
val size = field.binaryProperties.dataSize
val debugFieldName = field.name + "_debug"
val debugDataType = AlphaNumeric(s"X($size)", size, None, Some(HEX), None)

val debugField = field.copy(name = debugFieldName,
dataType = debugDataType,
redefines = Some(field.name),
decode = StringDecoders.decodeHex) (parent = field.parent)

debugField
}

def processGroup(group: Group): Group = {
val newChildren = ArrayBuffer[Statement]()
group.children.foreach {
case grp: Group =>
val newGrp = processGroup(grp)
newChildren += newGrp
case st: Primitive =>
if (st.redefines.isDefined) {
newChildren += st
} else {
newChildren += st.withUpdatedIsRedefined(newIsRedefined = true)
newChildren += getDebugField(st)
}
}
group.withUpdatedChildren(newChildren)
}

if (addDebuggingFields) {
processGroup(ast)
} else {
ast
}

}

/**
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -21,8 +21,8 @@ import java.nio.charset.{Charset, StandardCharsets}
import za.co.absa.cobrix.cobol.parser.ast.datatype.{AlphaNumeric, COMP1, COMP2, COMP3, COMP4, COMP5, COMP9, CobolType, Decimal, Integral, Usage}
import za.co.absa.cobrix.cobol.parser.common.Constants
import za.co.absa.cobrix.cobol.parser.decoders.FloatingPointFormat.FloatingPointFormat
import za.co.absa.cobrix.cobol.parser.encoding._
import za.co.absa.cobrix.cobol.parser.encoding.codepage.{CodePage, CodePageCommon}
import za.co.absa.cobrix.cobol.parser.encoding.{ASCII, EBCDIC, Encoding, UTF16}
import za.co.absa.cobrix.cobol.parser.position.Position

import scala.util.control.NonFatal
Expand Down Expand Up @@ -84,6 +84,8 @@ object DecoderSelector {
}
case UTF16 =>
StringDecoders.decodeUtf16String(_, getStringStrimmingType(stringTrimmingPolicy), isUtf16BigEndian)
case HEX =>
StringDecoders.decodeHex
}
}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -30,6 +30,9 @@ object StringDecoders {
val TrimRight = 3
val TrimBoth = 4

// Characters used for HEX conversion
private val HEX_ARRAY = "0123456789ABCDEF".toCharArray

/**
* A decoder for any EBCDIC string fields (alphabetical or any char)
*
Expand Down Expand Up @@ -110,6 +113,24 @@ object StringDecoders {
}
}

/**
* A decoder for representing bytes as hex strings
*
* @param bytes A byte array that represents the binary data
* @return A HEX string representation of the binary data
*/
def decodeHex(bytes: Array[Byte]): String = {
val hexChars = new Array[Char](bytes.length * 2)
var i = 0
while (i < bytes.length) {
val v = bytes(i) & 0xFF
hexChars(i * 2) = HEX_ARRAY(v >>> 4)
hexChars(i * 2 + 1) = HEX_ARRAY(v & 0x0F)
i += 1
}
new String(hexChars)
}

/**
* A decoder for any EBCDIC uncompressed numbers supporting
* <ul>
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -25,3 +25,5 @@ case object EBCDIC extends Encoding
case object ASCII extends Encoding

case object UTF16 extends Encoding

case object HEX extends Encoding
120 changes: 20 additions & 100 deletions data/test24_expected/test24.txt

Large diffs are not rendered by default.

Loading

0 comments on commit 12f87c4

Please sign in to comment.