fork download
  1. # #!/usr/bin/env python3
  2.  
  3. # import re
  4. # import csv
  5. # from datetime import datetime
  6. # import argparse
  7. # import os
  8.  
  9. # # Define the keywords for filtering
  10. # KEYWORD1 = "TIMESTAMP1"
  11. # KEYWORD2 = "TIMESTAMP2"
  12.  
  13. # def extract_timestamp(line):
  14. # timestamp_pattern = r'\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}\.\d{6}\+\d{2}'
  15. # match = re.search(timestamp_pattern, line)
  16. # return match.group(0) if match else None
  17.  
  18. # def parse_log_line(line, verbose=False):
  19. # timestamp = extract_timestamp(line)
  20. # if not timestamp:
  21. # return None
  22.  
  23. # data = {'timestamp': timestamp}
  24.  
  25. # # Extract Id, Pt, and Et
  26. # id_match = re.search(r'Id<(\d+)>', line)
  27. # pt_match = re.search(r'Pt<(\d+)>', line)
  28. # et_match = re.search(r'Et<(\d+)>', line)
  29.  
  30. # if not id_match:
  31. # return None # Skip lines without Id
  32.  
  33. # data['Id'] = id_match.group(1)
  34. # if pt_match:
  35. # data['Pt'] = pt_match.group(1)
  36. # if et_match:
  37. # data['Et'] = et_match.group(1)
  38.  
  39. # # Categorize the line
  40. # if pt_match and et_match:
  41. # data['type'] = 'start'
  42. # elif 'send to measure' in line:
  43. # data['type'] = 'sent'
  44. # elif 'measurement complete' in line:
  45. # data['type'] = 'complete'
  46. # else:
  47. # return None # Skip lines that don't match any of the three types
  48.  
  49. # if verbose:
  50. # print("Parsed line:")
  51. # print(f" Timestamp: {data['timestamp']}")
  52. # print(f" Id: {data['Id']}")
  53. # print(f" Type: {data['type']}")
  54. # if 'Pt' in data:
  55. # print(f" Pt: {data['Pt']}")
  56. # if 'Et' in data:
  57. # print(f" Et: {data['Et']}")
  58.  
  59. # return data
  60.  
  61. # def process_log(log_file, verbose=False):
  62. # data_dict = {}
  63. # lines_processed = 0
  64. # valid_lines = 0
  65.  
  66. # with open(log_file, 'r') as infile:
  67. # for line in infile:
  68. # lines_processed += 1
  69. # parsed_data = parse_log_line(line, verbose)
  70. # if parsed_data:
  71. # valid_lines += 1
  72. # id_num = parsed_data['Id']
  73. # if id_num not in data_dict:
  74. # data_dict[id_num] = {'Pt': None, 'Et': None, 'start': None, 'sent': None, 'complete': None}
  75.  
  76. # line_type = parsed_data['type']
  77. # data_dict[id_num][line_type] = parsed_data['timestamp']
  78.  
  79. # if line_type == 'start':
  80. # data_dict[id_num]['Pt'] = parsed_data.get('Pt')
  81. # data_dict[id_num]['Et'] = parsed_data.get('Et')
  82.  
  83. # if verbose:
  84. # print("\nProcessing Summary:")
  85. # print(f"Total lines processed: {lines_processed}")
  86. # print(f"Valid lines for data processing: {valid_lines}")
  87. # print(f"Unique Ids found: {len(data_dict)}")
  88.  
  89. # return data_dict
  90.  
  91. # def create_intermediate_log(input_log_file, output_log_file, verbose=False):
  92. # """
  93. # Create an intermediate log file that only contains lines with specific keywords.
  94.  
  95. # Purpose:
  96. # - Filter the input log file based on two predefined keywords.
  97. # - Create a new log file with only the filtered lines.
  98.  
  99. # Expected Behavior:
  100. # - Read the input log file line by line.
  101. # - Check each line for the presence of either KEYWORD1 or KEYWORD2.
  102. # - If a line contains either keyword, write it to the output log file.
  103. # - Provide a summary of the filtering process if verbose mode is enabled.
  104.  
  105. # Args:
  106. # input_log_file (str): Path to the input log file.
  107. # output_log_file (str): Path to the output intermediate log file.
  108. # verbose (bool): Whether to print detailed processing information.
  109.  
  110. # Returns:
  111. # int: The number of lines written to the intermediate log file.
  112. # """
  113. # lines_processed = 0
  114. # lines_written = 0
  115.  
  116. # with open(input_log_file, 'r') as infile, open(output_log_file, 'w') as outfile:
  117. # for line in infile:
  118. # lines_processed += 1
  119. # if KEYWORD1 in line or KEYWORD2 in line:
  120. # outfile.write(line)
  121. # lines_written += 1
  122.  
  123. # if verbose:
  124. # print("\nIntermediate Log Creation Summary:")
  125. # print(f"Input file: {input_log_file}")
  126. # print(f"Output file: {output_log_file}")
  127. # print(f"Total lines processed: {lines_processed}")
  128. # print(f"Lines written to intermediate log: {lines_written}")
  129. # print(f"Filtering keywords: '{KEYWORD1}' and '{KEYWORD2}'")
  130.  
  131. # return lines_written
  132.  
  133. # def calculate_durations(data_dict):
  134. # for id_num, data in data_dict.items():
  135. # if data['start'] and data['sent']:
  136. # start_time = datetime.strptime(data['start'], '%Y-%m-%d %H:%M:%S.%f+%H')
  137. # sent_time = datetime.strptime(data['sent'], '%Y-%m-%d %H:%M:%S.%f+%H')
  138. # data['sent_to_start'] = (sent_time - start_time).total_seconds()
  139.  
  140. # if data['start'] and data['complete']:
  141. # start_time = datetime.strptime(data['start'], '%Y-%m-%d %H:%M:%S.%f+%H')
  142. # complete_time = datetime.strptime(data['complete'], '%Y-%m-%d %H:%M:%S.%f+%H')
  143. # data['total_duration'] = (complete_time - start_time).total_seconds()
  144.  
  145. # def write_csv(data_dict, output_file, verbose=False):
  146. # with open(output_file, 'w', newline='') as csvfile:
  147. # writer = csv.writer(csvfile)
  148. # writer.writerow(['Id', 'Pt', 'Et', 'Sent to Start (s)', 'Total Duration (s)'])
  149.  
  150. # for id_num, data in data_dict.items():
  151. # row = [
  152. # id_num,
  153. # data.get('Pt', ''),
  154. # data.get('Et', ''),
  155. # round(data.get('sent_to_start', 0), 3),
  156. # round(data.get('total_duration', 0), 3)
  157. # ]
  158. # writer.writerow(row)
  159.  
  160. # if verbose:
  161. # print("\nCSV Output:")
  162. # print(f"File created: {output_file}")
  163. # print(f"Total rows written: {len(data_dict)}")
  164.  
  165. # def main(log_file, output_file, verbose=True):
  166. # if verbose:
  167. # print("Starting log processing")
  168. # print(f"Input file: {log_file}")
  169. # print(f"Output file: {output_file}")
  170.  
  171. # # Create intermediate log file name
  172. # base_name = os.path.splitext(output_file)[0]
  173. # intermediate_log_file = f"{base_name}_intermediate.log"
  174.  
  175. # # Create the intermediate log
  176. # lines_filtered = create_intermediate_log(log_file, intermediate_log_file, verbose)
  177.  
  178. # # Process the original log for data extraction
  179. # data_dict = process_log(log_file, verbose)
  180. # calculate_durations(data_dict)
  181. # write_csv(data_dict, output_file, verbose)
  182.  
  183. # print("\nProcessing complete.")
  184. # print(f"Total unique Ids processed: {len(data_dict)}")
  185. # print(f"Output written to: {output_file}")
  186. # print(f"Intermediate log written to: {intermediate_log_file}")
  187. # print(f"Lines in intermediate log: {lines_filtered}")
  188.  
  189. # if __name__ == "__main__":
  190. # parser = argparse.ArgumentParser(description="Parse log file, generate CSV report, and create filtered intermediate log.")
  191. # parser.add_argument("log_file", help="Path to the log file")
  192. # parser.add_argument("-o", "--output", default="output.csv", help="Output CSV file name")
  193. # parser.add_argument("-q", "--quiet", action="store_true", help="Disable verbose mode")
  194. # args = parser.parse_args()
  195.  
  196. # main(args.log_file, args.output, not args.quiet)
  197.  
  198. print(34)
Success #stdin #stdout 0.03s 9092KB
stdin
Standard input is empty
stdout
34