Invoice Generator

 pip install reportlab


from reportlab.lib.pagesizes import letter
from reportlab.pdfgen import canvas

def generate_invoice(client_name, items, tax_rate=0.05, filename="invoice.pdf"):
    c = canvas.Canvas(filename, pagesize=letter)
    width, height = letter

    # Title
    c.setFont("Helvetica-Bold", 16)
    c.drawString(50, height - 50, "INVOICE")

    # Client Details
    c.setFont("Helvetica", 12)
    c.drawString(50, height - 80, f"Client: {client_name}")

    # Table Headers
    c.setFont("Helvetica-Bold", 12)
    c.drawString(50, height - 120, "Item")
    c.drawString(250, height - 120, "Quantity")
    c.drawString(350, height - 120, "Price")
    c.drawString(450, height - 120, "Total")

    # Invoice Items
    c.setFont("Helvetica", 12)
    y_position = height - 140
    total_cost = 0

    for item, details in items.items():
        quantity, price = details
        total = quantity * price
        total_cost += total

        c.drawString(50, y_position, item)
        c.drawString(250, y_position, str(quantity))
        c.drawString(350, y_position, f"${price:.2f}")
        c.drawString(450, y_position, f"${total:.2f}")
        y_position -= 20

    # Tax and Total Calculation
    tax_amount = total_cost * tax_rate
    grand_total = total_cost + tax_amount

    c.setFont("Helvetica-Bold", 12)
    c.drawString(350, y_position - 20, "Subtotal:")
    c.drawString(450, y_position - 20, f"${total_cost:.2f}")

    c.drawString(350, y_position - 40, f"Tax ({tax_rate * 100}%):")
    c.drawString(450, y_position - 40, f"${tax_amount:.2f}")

    c.drawString(350, y_position - 60, "Grand Total:")
    c.drawString(450, y_position - 60, f"${grand_total:.2f}")

    # Save PDF
    c.save()
    print(f"āœ… Invoice saved as {filename}")

PDF to Audio Converter

 pip install PyMuPDF gtts tk


import fitz  # PyMuPDF
from gtts import gTTS
import tkinter as tk
from tkinter import filedialog, messagebox
import os

# Function to extract text from PDF
def extract_text_from_pdf(pdf_path):
    doc = fitz.open(pdf_path)
    text = ""
    for page in doc:
        text += page.get_text("text") + "\n"
    return text

# Function to convert text to speech
def convert_text_to_audio(text, output_file):
    if text.strip():
        tts = gTTS(text=text, lang='en')
        tts.save(output_file)
        messagebox.showinfo("Success", f"Audio file saved as {output_file}")
        os.system(f"start {output_file}")  # Opens the audio file
    else:
        messagebox.showwarning("Warning", "No text found in PDF.")

# Function to open file dialog
def select_pdf():
    file_path = filedialog.askopenfilename(filetypes=[("PDF Files", "*.pdf")])
    if file_path:
        text = extract_text_from_pdf(file_path)
        if text:
            convert_text_to_audio(text, "output_audio.mp3")

# Tkinter GUI Setup
root = tk.Tk()
root.title("PDF to Audio Converter šŸ”Š")
root.geometry("400x200")

lbl_title = tk.Label(root, text="šŸ“„ PDF to Audio Converter šŸ”Š", font=("Arial", 14, "bold"))
lbl_title.pack(pady=10)

btn_select_pdf = tk.Button(root, text="Select PDF & Convert", command=select_pdf, font=("Arial", 12))
btn_select_pdf.pack(pady=20)

root.mainloop()

Network Speed Monitor

 pip install speedtest-cli matplotlib tk


import speedtest
import tkinter as tk
from tkinter import ttk
import threading
import matplotlib.pyplot as plt
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg

# Function to test internet speed
def test_speed():
    st = speedtest.Speedtest()
    st.get_best_server()
    
    download_speed = round(st.download() / 1_000_000, 2)  # Convert to Mbps
    upload_speed = round(st.upload() / 1_000_000, 2)  # Convert to Mbps

    return download_speed, upload_speed

# Function to update the speed in GUI
def update_speed():
    global speeds_download, speeds_upload
    
    while True:
        download, upload = test_speed()
        speeds_download.append(download)
        speeds_upload.append(upload)

        lbl_download.config(text=f"Download Speed: {download} Mbps")
        lbl_upload.config(text=f"Upload Speed: {upload} Mbps")

        update_graph()
        root.update_idletasks()

# Function to update the speed graph
def update_graph():
    ax.clear()
    ax.plot(speeds_download, label="Download Speed (Mbps)", color="blue")
    ax.plot(speeds_upload, label="Upload Speed (Mbps)", color="red")
    
    ax.set_title("Network Speed Over Time")
    ax.set_xlabel("Test Count")
    ax.set_ylabel("Speed (Mbps)")
    ax.legend()
    ax.grid()

    canvas.draw()

# Tkinter GUI setup
root = tk.Tk()
root.title("Network Speed Monitor šŸŒ")
root.geometry("500x500")

lbl_title = tk.Label(root, text="šŸ“” Network Speed Monitor", font=("Arial", 14, "bold"))
lbl_title.pack(pady=10)

lbl_download = tk.Label(root, text="Download Speed: -- Mbps", font=("Arial", 12))
lbl_download.pack(pady=5)

lbl_upload = tk.Label(root, text="Upload Speed: -- Mbps", font=("Arial", 12))
lbl_upload.pack(pady=5)

# Matplotlib graph setup
fig, ax = plt.subplots(figsize=(5, 3))
speeds_download = []
speeds_upload = []

canvas = FigureCanvasTkAgg(fig, master=root)
canvas.get_tk_widget().pack()

# Run speed test in a separate thread
threading.Thread(target=update_speed, daemon=True).start()

root.mainloop()

Code Syntax Highlighter

 pip install pygments


from pygments import highlight
from pygments.lexers import guess_lexer, PythonLexer, JavascriptLexer, CLexer
from pygments.formatters import TerminalFormatter, HtmlFormatter

# Function to highlight code in terminal
def highlight_code_terminal(code, language="python"):
    # Choose lexer based on language
    lexer = {
        "python": PythonLexer(),
        "javascript": JavascriptLexer(),
        "c": CLexer(),
    }.get(language.lower(), guess_lexer(code))

    highlighted_code = highlight(code, lexer, TerminalFormatter())
    return highlighted_code


# Function to highlight code and save as HTML
def highlight_code_html(code, language="python", output_file="highlighted_code.html"):
    lexer = {
        "python": PythonLexer(),
        "javascript": JavascriptLexer(),
        "c": CLexer(),
    }.get(language.lower(), guess_lexer(code))

    formatter = HtmlFormatter(full=True, style="monokai")
    highlighted_code = highlight(code, lexer, formatter)

    # Save to an HTML file
    with open(output_file, "w", encoding="utf-8") as f:
        f.write(highlighted_code)
    
    print(f"āœ… Highlighted code saved to {output_file}")

from pygments import highlight
from pygments.lexers import guess_lexer, PythonLexer, JavascriptLexer, CLexer
from pygments.formatters import TerminalFormatter, HtmlFormatter

# Function to highlight code in terminal
def highlight_code_terminal(code, language="python"):
    # Choose lexer based on language
    lexer = {
        "python": PythonLexer(),
        "javascript": JavascriptLexer(),
        "c": CLexer(),
    }.get(language.lower(), guess_lexer(code))

    highlighted_code = highlight(code, lexer, TerminalFormatter())
    return highlighted_code


# Function to highlight code and save as HTML
def highlight_code_html(code, language="python", output_file="highlighted_code.html"):
    lexer = {
        "python": PythonLexer(),
        "javascript": JavascriptLexer(),
        "c": CLexer(),
    }.get(language.lower(), guess_lexer(code))

    formatter = HtmlFormatter(full=True, style="monokai")
    highlighted_code = highlight(code, lexer, formatter)

    # Save to an HTML file
    with open(output_file, "w", encoding="utf-8") as f:
        f.write(highlighted_code)
    
    print(f"āœ… Highlighted code saved to {output_file}")


# Example usage
if __name__ == "__main__":
    code_sample = """
    def greet(name):
        print(f"Hello, {name}!")
    
    greet("abc")
    """

    print("šŸŽØ Terminal Highlighted Code:")
    print(highlight_code_terminal(code_sample, "python"))

    highlight_code_html(code_sample, "python")


Resume Parser & Analyzer


pip install spacy pdfminer.six python-docx pandas nltk

python -m spacy download en_core_web_sm


import re

import spacy

import pdfminer.high_level

import docx

import nltk

from collections import Counter


nltk.download("stopwords")

from nltk.corpus import stopwords


# Load spaCy NLP model

nlp = spacy.load("en_core_web_sm")



# Function to extract text from PDF

def extract_text_from_pdf(pdf_path):

    return pdfminer.high_level.extract_text(pdf_path)



# Function to extract text from DOCX

def extract_text_from_docx(docx_path):

    doc = docx.Document(docx_path)

    return "\n".join([para.text for para in doc.paragraphs])



# Function to extract email from text

def extract_email(text):

    email_pattern = r"[a-zA-Z0-9._%+-]+@[a-zA-Z0-9.-]+\.[a-zA-Z]{2,}"

    emails = re.findall(email_pattern, text)

    return emails[0] if emails else None



# Function to extract phone number from text

def extract_phone(text):

    phone_pattern = r"\(?\d{3}\)?[-.\s]?\d{3}[-.\s]?\d{4}"

    phones = re.findall(phone_pattern, text)

    return phones[0] if phones else None



# Function to extract skills from text

def extract_skills(text):

    skills_list = ["Python", "Java", "C++", "Machine Learning", "Data Science", "SQL", "Django", "React", "Flask"]

    found_skills = [skill for skill in skills_list if skill.lower() in text.lower()]

    return found_skills



# Function to extract name using NLP

def extract_name(text):

    doc = nlp(text)

    for ent in doc.ents:

        if ent.label_ == "PERSON":

            return ent.text

    return None



# Function to match skills with a job description

def match_skills(resume_skills, job_description):

    job_tokens = nltk.word_tokenize(job_description.lower())

    stop_words = set(stopwords.words("english"))

    filtered_job_tokens = [word for word in job_tokens if word not in stop_words]


    skill_match_count = sum(1 for skill in resume_skills if skill.lower() in filtered_job_tokens)

    match_percentage = (skill_match_count / len(resume_skills)) * 100 if resume_skills else 0

    return round(match_percentage, 2)



# Main function

def analyze_resume(file_path, job_description):

    # Extract text

    text = extract_text_from_pdf(file_path) if file_path.endswith(".pdf") else extract_text_from_docx(file_path)


    # Extract details

    name = extract_name(text)

    email = extract_email(text)

    phone = extract_phone(text)

    skills = extract_skills(text)

    match_percentage = match_skills(skills, job_description)


    # Display results

    print("\nšŸ“„ Resume Analysis Results:")

    print(f"šŸ‘¤ Name: {name}")

    print(f"šŸ“§ Email: {email}")

    print(f"šŸ“ž Phone: {phone}")

    print(f"šŸ›  Skills: {', '.join(skills)}")

    print(f"āœ… Skill Match with Job: {match_percentage}%")


    return {"name": name, "email": email, "phone": phone, "skills": skills, "match_percentage": match_percentage}