GPT+claude 와 열심히 삽질
import os
import random
import struct
import math
def calculate_entropy(data):
if not data:
return 0
prob = [float(data.count(x)) / len(data) for x in set(data)]
return -sum(p * math.log2(p) for p in prob)
def calculate_deviation(section_data):
# YARA RULE: math.deviation(raw_data_offset, section.raw_data_size, math.MEAN_BYTES)
section_mean = sum(section_data) / len(section_data)
variance = sum((x - section_mean) ** 2 for x in section_data) / len(section_data)
deviation = math.sqrt(variance)
return deviation
def align(val, alignment):
return ((val + alignment - 1) // alignment) * alignment
def create_custom_binary(filename):
global virtual_size
binary_content = bytearray()
# DOS Header
binary_content.extend(b'MZ')
binary_content.extend(b'\\x00' * 58)
binary_content.extend(struct.pack('<I', 0x80))
# DOS Stub
binary_content.extend(b'\\x0E\\x1F\\xBA\\x0E\\x00\\xB4\\x09\\xCD\\x21\\xB8\\x01\\x4C\\xCD\\x21')
binary_content.extend(b'This program cannot be run in DOS mode.\\r\\r\\n$')
binary_content.extend(b'\\x00' * (0x80 - len(binary_content)))
# NT Headers
nt_headers_offset = len(binary_content)
binary_content.extend(b'PE\\x00\\x00')
# File Header
binary_content.extend(struct.pack('<HHIIIHH', 0x8664, 23, 0, 0, 0, 240, 0x22))
# Optional Header
image_base = 0x400000
binary_content.extend(struct.pack('<HBBIIIIIIIIIHHHHHHIIIIHHIIIIII',
0x20B, 14, 20, 0x22400, 0x7600, 0x0, 0x21860, 0x1000, 0x24000, 0x400000,
0x1000, 0x1000, 4, 0, 0, 0, 4, 0, 0, 0, 0x1000, 0, 2, 0,
0x100000, 0x1000, 0x100000, 0x1000, 0, 14))
# Data Directories
for _ in range(18):
binary_content.extend(struct.pack('<II', 0, 0))
# Reserve space for Section Headers
section_header_offset = len(binary_content)
binary_content.extend(b'\\x00' * (40 * 23)) # 23 sections, 40 bytes each
# Pad to the start of the first section
binary_content.extend(b'\\x00' * (0x400 - len(binary_content)))
# Create sections
section_names = [b'.text', b'.data', b'.rsrc', b'.rdata', b'.reloc', b'.fiesta'] + [f'.sec{i}'.encode() for i in range(6, 23)]
sections = []
virtual_address = 0x1000
raw_address = 0x400
section_data = b''
for name in section_names:
raw_size = 0x1000
# Fill section data
if name == b'.text':
virtual_address = virtual_address + len(section_data)
section_data = b"fiesta"
section_data += bytes([0x68, 0x40, 0x30, 0x00, 0x00, 0x6A, 0x14, 0x8D, 0x91]) # fiesta3 pattern
section_data += bytes([0x90] * 4) + b'\\x68' + os.urandom(4) + b'\\xC3' # fiesta4 pattern
section_data += os.urandom(raw_size - len(section_data))
binary_content.extend(section_data)
characteristics = 0x60000020 # CODE | EXECUTE | READ
section = {'Name': name, 'VirtualSize': raw_size, 'VirtualAddress': virtual_address,
'SizeOfRawData': raw_size, 'PointerToRawData': len(binary_content),
'PointerToRelocations': 0,
'PointerToLinenumbers': 0, 'NumberOfRelocations': 0, 'NumberOfLinenumbers': 0,
'Characteristics': characteristics}
sections.append(section)
binary_content.extend(section_data)
elif name == b'.data':
virtual_address = virtual_address + len(section_data)
version_info = (
b'\\x00\\x00\\x00\\x00' # Characteristics
b'\\x00\\x00\\x00\\x00' # TimeDateStamp
b'\\x00\\x00' # MajorVersion
b'\\x00\\x00' # MinorVersion
b'\\x01\\x00' # NumberOfNamedEntries
b'\\x01\\x00' # NumberOfIdEntries
b'\\x53\\x00\\x74\\x00\\x72\\x00\\x69\\x00\\x6E\\x00\\x67\\x00\\x46\\x00\\x69\\x00' # "StringFi"
b'\\x6C\\x00\\x65\\x00\\x49\\x00\\x6E\\x00\\x66\\x00\\x6F\\x00\\x00\\x00' # "leInfo"
b'\\x00\\x00\\x00\\x00' # ValueLength
b'\\x01\\x00' # Type
b'\\x43\\x00\\x6F\\x00\\x6D\\x00\\x70\\x00\\x61\\x00\\x6E\\x00\\x79\\x00\\x4E\\x00' # "CompanyName"
b'\\x61\\x00\\x6D\\x00\\x65\\x00\\x00\\x00' # "ame"
b'\\x46\\x00\\x69\\x00\\x65\\x00\\x73\\x00\\x74\\x00\\x61\\x00\\x00\\x00' # "Fiesta"
)
characteristics = 0x60000020 # CODE | EXECUTE | READ
section = {'Name': name, 'VirtualSize': raw_size, 'VirtualAddress': virtual_address,
'SizeOfRawData': raw_size, 'PointerToRawData': len(binary_content),
'PointerToRelocations': 0,
'PointerToLinenumbers': 0, 'NumberOfRelocations': 0, 'NumberOfLinenumbers': 0,
'Characteristics': characteristics}
sections.append(section)
binary_content.extend(version_info)
elif name == b'.fiesta':
# Adjust data to meet deviation requirement
virtual_address = virtual_address + len(section_data)
base_data = b'fiesta' * ((raw_size // len(b'fiesta')) - 1)
section_data = bytearray(base_data)
characteristics = 0xE0000060 # INITIALIZED_DATA | READ | WRITE | EXECUTE
fiesta2_pattern = b'\\x5F' + b'\\xEE\\xEE\\xEE\\xEE' + b'\\x93\\xEE\\x31\\x12\\xEE\\xEE\\xEE\\x2E\\xE0' + b'\\x00\\x00' # fiesta2 pattern
# 5F 4바이트이상 93 1바이트 31 12 2바이트 끝이0이아닌바이트 2★ ?0
section = {'Name': name, 'VirtualSize': raw_size, 'VirtualAddress': virtual_address,
'SizeOfRawData': raw_size, 'PointerToRawData': len(binary_content),
'PointerToRelocations': 0,
'PointerToLinenumbers': 0, 'NumberOfRelocations': 0, 'NumberOfLinenumbers': 0,
'Characteristics': characteristics}
sections.append(section)
while not (64.8 < calculate_deviation(section_data) < 64.9): # section['PointerToRawData'], section['SizeOfRawData']
print(calculate_deviation(section_data))
random_index = random.randint(0, len(section_data) - 5)
section_data[random_index] = random.randint(0, 255)
section_data[0x2F: 0x2F+len(fiesta2_pattern)] = fiesta2_pattern
fiesta1_pattern = b'\\x66\\x69\\x65\\x73\\x74\\x61\\x00\\x00'
section_data[0x00: 0x00+len(fiesta1_pattern)] = fiesta1_pattern
print(section_data[0x2F: 0x2F+len(fiesta2_pattern)])
print(section_data[0x00: 0x00+len(fiesta1_pattern)])
print(calculate_deviation(section_data))
binary_content.extend(section_data)
elif name == b'.rdata':
virtual_address = virtual_address + len(section_data)
characteristics = 0x40000040 # INITIALIZED_DATA | READ
# .rdata section (Import Table)
import_table = (
b'\\x00\\x40\\x00\\x00' # OriginalFirstThunk (RVA)
b'\\x00\\x00\\x00\\x00' # TimeDateStamp
b'\\x00\\x00\\x00\\x00' # ForwarderChain
b'\\x1C\\x40\\x00\\x00' # Name (RVA)
b'\\x00\\x40\\x00\\x00' # FirstThunk (RVA)
) * 2 # Two import descriptors (fiesta.dll and dummy.dll)
import_table += b'\\x00' * 20 # Null terminator
# Import Lookup Table for fiesta.dll
import_table += struct.pack('<Q', 0x4038) * 3 # 3 functions
import_table += b'\\x00' * 8 # Null terminator
# Import Lookup Table for dummy.dll
import_table += struct.pack('<Q', 0x4050) * 59 # 59 functions
import_table += b'\\x00' * 8 # Null terminator
# DLL names
import_table += b'fiesta.dll\\x00'
import_table += b'dummy.dll\\x00'
# Function names (Hint/Name Table)
import_table += b'\\x00\\x00func1\\x00\\x00\\x00func2\\x00\\x00\\x00func3\\x00' # fiesta.dll functions
for i in range(59):
import_table += f'\\x00\\x00func{i + 1}\\x00'.encode() # dummy.dll functions
"""
# Import Directory for fiesta.dll
import_directory = struct.pack('<IIIIIIII', virtual_address, 0, 0, virtual_address + 0x14, 0, 0, 0, 0)
import_directory += struct.pack('<IIIIIIII', virtual_address + 0x3C, 0, 0, virtual_address + 0x50, virtual_address + 0x60, 0, 0, 0)
import_directory += b'\\x00' * 0x14
# Import Thunk Table for fiesta.dll (3 functions)
import_directory += b'fiesta.dll\\x00'
import_directory += struct.pack('<QQQ', virtual_address + 0x70, virtual_address + 0x78,
virtual_address + 0x80)
import_directory += b'\\x00' * 0x18 # padding
import_directory += b'func1\\x00func2\\x00func3\\x00' # Only 3 functions from fiesta.dll
# Add additional imports to reach 62 imported functions
other_imports = b''
for i in range(4, 63): # Importing 59 additional functions from other DLLs
dll_name = f'lib{i}.dll\\x00'.encode() # Placeholder DLL names
other_imports += dll_name + b''.join([f'func{i}\\x00'.encode() for i in range(1, 2)])
# Concatenate everything and pad to fit the raw size
section_data = import_directory + other_imports + b'\\x00' * (
raw_size - len(import_directory) - len(other_imports))
"""
section = {'Name': name, 'VirtualSize': raw_size, 'VirtualAddress': virtual_address,
'SizeOfRawData': raw_size, 'PointerToRawData': len(binary_content),
'PointerToRelocations': 0,
'PointerToLinenumbers': 0, 'NumberOfRelocations': 0, 'NumberOfLinenumbers': 0,
'Characteristics': characteristics}
sections.append(section)
binary_content.extend(import_table)
elif name == b'.rsrc':
virtual_address = virtual_address + len(section_data)
characteristics = 0x40000040 # INITIALIZED_DATA | READ | WRITE | EXECUTE
resource_data = (
b'\\x00\\x00\\x00\\x00' # Characteristics
b'\\x00\\x00\\x00\\x00' # TimeDateStamp
b'\\x04\\x00' # MajorVersion
b'\\x00\\x00' # MinorVersion
b'\\x00\\x00' # NumberOfNamedEntries
b'\\x01\\x00' # NumberOfIdEntries
b'\\x01\\x00\\x00\\x00' # Type (RT_CURSOR)
b'\\x80\\x00\\x00\\x00' # Offset to directory
)
section = {'Name': name, 'VirtualSize': raw_size, 'VirtualAddress': virtual_address,
'SizeOfRawData': raw_size, 'PointerToRawData': len(binary_content),
'PointerToRelocations': 0, 'PointerToLinenumbers': 0, 'NumberOfRelocations': 0, 'NumberOfLinenumbers': 0,
'Characteristics': characteristics}
resource_data += os.urandom(raw_size - len(resource_data))
section_data = resource_data
sections.append(section)
binary_content.extend(section_data)
else:
virtual_address = virtual_address + len(section_data)
section_data = os.urandom(raw_size)
# characteristics = 0xC0000040 # INITIALIZED_DATA | READ | WRITE
# characteristics = 0x42000040 # INITIALIZED_DATA | DISCARDABLE | READ
characteristics = 0xE0000060 # INITIALIZED_DATA | READ | WRITE | EXECUTE
section = {'Name': name, 'VirtualSize': raw_size, 'VirtualAddress': virtual_address,
'SizeOfRawData': raw_size, 'PointerToRawData': len(binary_content),
'PointerToRelocations': 0,
'PointerToLinenumbers': 0, 'NumberOfRelocations': 0, 'NumberOfLinenumbers': 0,
'Characteristics': characteristics}
sections.append(section)
binary_content.extend(section_data)
virtual_size = len(section_data)
raw_address = len(binary_content)
raw_address += align(raw_size, raw_size)
# Write Section Headers
for i, section in enumerate(sections):
offset = section_header_offset + i * 40
binary_content[offset:offset+8] = section['Name'].ljust(8, b'\\x00')
binary_content[offset+8:offset+40] = struct.pack('<IIIIIIII',
section['VirtualSize'],
section['VirtualAddress'],
section['SizeOfRawData'],
section['PointerToRawData'],
section['PointerToRelocations'],
section['PointerToLinenumbers'],
section['NumberOfRelocations'] | (section['NumberOfLinenumbers'] << 16),
section['Characteristics']
)
# Ensure entropy
entropy_value = calculate_entropy(binary_content)
print(f"Initial entropy: {entropy_value}")
while entropy_value <= 6:
start_offset = sections[22]['VirtualAddress'] - sections[10]['VirtualAddress']
end_offset = sections[22]['VirtualAddress']
binary_content[start_offset:end_offset] = os.urandom(0x1000*12)
entropy_value = calculate_entropy(binary_content)
print(f"Updated entropy: {entropy_value}")
# Update file header fields
file_size = len(binary_content)
binary_content[0x3C:0x40] = struct.pack('<I', 0x80) # e_lfanew
binary_content[nt_headers_offset + 0x50:nt_headers_offset + 0x54] = struct.pack('<I', virtual_address) # SizeOfImage
# Write the content to the binary file
with open(filename, 'wb') as f:
f.write(binary_content)
print(f"Created custom binary: {filename}")
print(f"File size: {file_size} bytes")
# Usage
create_custom_binary('custom_fiesta2.exe')
print("=================================================")
import yara
# Define the YARA rule as a string
import yara
# Define the updated YARA rule with debug information as a string
fiesta_rule = """
import "pe"
import "console"
import "math"
import "hash"
rule fiesta_rule {
meta:
description = "Let's go!"
author = "fiesta"
date = "22/08/2024"
version = "1.0"
strings:
$fiesta = "fiesta" nocase wide ascii
$fiesta2 = { 5F [4-] 93 ?? 31 12 [2] ~?1 2? ?0 }
$fiesta3 = { 68 40 30 00 00 6A 14 8D 91 }
$fiesta4 = { 90 90 90 90 68 ?? ?? ?? ?? C3 }
condition:
uint16(0) == 0x5A4D and
( any of ($fiesta3*) or console.log("Failed: $fiesta3 condition") ) and
( math.entropy(0, filesize) > 6 or console.log("Failed: entropy check") ) and
( pe.is_32bit() == 0 or console.log("Failed: pe.is_32bit() check") ) and
( pe.version_info["CompanyName"] == "Fiesta" or console.log("Failed: CompanyName check") ) and
( pe.number_of_imported_functions == 62 or console.log("Failed: number_of_imported_functions check") ) and
( $fiesta in ((pe.sections[pe.section_index(".fiesta")].raw_data_offset) .. (pe.sections[pe.section_index(".fiesta")].raw_data_offset + pe.sections[pe.section_index(".fiesta")].raw_data_size)) or console.log("Failed: $fiesta location check") ) and
( pe.imports("fiesta.dll") == 3 or console.log("Failed: imports check") ) and
( pe.number_of_resources == 1 or console.log("Failed: number_of_resources check") ) and
( pe.number_of_sections == 23 or console.log("Failed: number_of_sections check") ) and
( $fiesta4 or console.log("Failed: $fiesta4 check") ) and
(
for any section in pe.sections : (
section.name == ".fiesta" and
(math.deviation(section.raw_data_offset, section.raw_data_size, math.MEAN_BYTES) > 64.8 and
math.deviation(section.raw_data_offset, section.raw_data_size, math.MEAN_BYTES) < 64.9 or console.log("Failed: deviation check")) and
($fiesta2 at section.raw_data_offset + 0x2f or console.log("Failed: $fiesta2 location check"))
) or
console.log("Failed: .fiesta section condition")
) or
( hash.md5(0, filesize) == "33baf1c19ca30dac4617dbab5f375efd" or console.log("Failed: hash.md5 check") )
}
"""
def check_file_with_yara(file_path):
try:
# Compile the YARA rule
compiled_rule = yara.compile(source=fiesta_rule)
# Scan the file
matches = compiled_rule.match(file_path)
if matches:
print(f"File {file_path} matches the YARA rule!")
for match in matches:
print(f"Match details: {match}")
else:
print(f"File {file_path} does not match the YARA rule.")
except yara.Error as e:
print(f"Error during YARA scanning: {e}")
# Call the function to check the file with the YARA rule
check_file_with_yara('custom_fiesta2.exe')